diff --git a/gdb-14.1-add-support-for-SW64-001.patch b/gdb-14.1-add-support-for-SW64-001.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2bd260b9a4b8dec6a182c4f33d3f6ab03d56605f
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-001.patch
@@ -0,0 +1,3700 @@
+diff -Naur gdb-14.1-after-patch/gdb/arch/sw64.c gdb-14.1-sw64/gdb/arch/sw64.c
+--- gdb-14.1-after-patch/gdb/arch/sw64.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/arch/sw64.c 2025-03-03 10:59:13.070000000 +0800
+@@ -0,0 +1,104 @@
++/* Copyright (C) 2022-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "gdbsupport/common-defs.h"
++#include "sw64.h"
++#include
++#include
++
++/* Target description features. */
++
++#include "../features/sw64/cpu.c"
++#include "../features/sw64/fpu.c"
++
++#ifndef GDBSERVER
++#define STATIC_IN_GDB static
++#else
++#define STATIC_IN_GDB
++#endif
++
++STATIC_IN_GDB target_desc_up
++sw64_create_target_description (const struct sw64_gdbarch_features features)
++{
++ /* Now we should create a new target description. */
++ target_desc_up tdesc = allocate_target_description ();
++
++ std::string arch_name = "sw64";
++
++ if (features.xlen == 4)
++ arch_name.append ("32");
++ else if (features.xlen == 8)
++ arch_name.append ("64");
++
++ if (features.fputype == SINGLE_FLOAT)
++ arch_name.append ("f");
++ else if (features.fputype == DOUBLE_FLOAT)
++ arch_name.append ("d");
++
++ set_tdesc_architecture (tdesc.get (), arch_name.c_str ());
++
++ long regnum = 0;
++
++ /* For now we only support creating 32-bit or 64-bit x-registers. */
++ if (features.xlen == 8)
++ regnum = create_feature_sw64_base64 (tdesc.get (), regnum);
++
++ /* For now we only support creating single float and double float. */
++ regnum = create_feature_sw64_fpu (tdesc.get (), regnum);
++
++ return tdesc;
++}
++
++#ifndef GDBSERVER
++
++/* Wrapper used by std::unordered_map to generate hash for feature set. */
++struct sw64_gdbarch_features_hasher
++{
++ std::size_t
++ operator() (const sw64_gdbarch_features &features) const noexcept
++ {
++ return features.hash ();
++ }
++};
++
++/* Cache of previously seen target descriptions, indexed by the feature set
++ that created them. */
++static std::unordered_map sw64_tdesc_cache;
++
++const target_desc *
++sw64_lookup_target_description (const struct sw64_gdbarch_features features)
++{
++ /* Lookup in the cache. If we find it then return the pointer out of
++ the target_desc_up (which is a unique_ptr). This is safe as the
++ sw64_tdesc_cache will exist until GDB exits. */
++ const auto it = sw64_tdesc_cache.find (features);
++ if (it != sw64_tdesc_cache.end ())
++ return it->second.get ();
++
++ target_desc_up tdesc (sw64_create_target_description (features));
++
++ /* Add to the cache, and return a pointer borrowed from the
++ target_desc_up. This is safe as the cache (and the pointers
++ contained within it) are not deleted until GDB exits. */
++ target_desc *ptr = tdesc.get ();
++ sw64_tdesc_cache.emplace (features, std::move (tdesc));
++ return ptr;
++}
++
++#endif /* !GDBSERVER */
+diff -Naur gdb-14.1-after-patch/gdb/arch/sw64.h gdb-14.1-sw64/gdb/arch/sw64.h
+--- gdb-14.1-after-patch/gdb/arch/sw64.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/arch/sw64.h 2025-03-03 10:59:13.070000000 +0800
+@@ -0,0 +1,118 @@
++/* Common target-dependent functionality for LoongArch
++
++ Copyright (C) 2022-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#ifndef ARCH_LOONGARCH_H
++#define ARCH_LOONGARCH_H
++
++#include "gdbsupport/tdesc.h"
++
++/* Register numbers of various important registers. */
++enum sw64_regnum
++{
++ LOONGARCH_RA_REGNUM = 1, /* Return Address. */
++ LOONGARCH_SP_REGNUM = 3, /* Stack Pointer. */
++ LOONGARCH_A0_REGNUM = 4, /* First Argument/Return Value. */
++ LOONGARCH_A7_REGNUM = 11, /* Seventh Argument/Syscall Number. */
++ LOONGARCH_FP_REGNUM = 22, /* Frame Pointer. */
++ LOONGARCH_ORIG_A0_REGNUM = 32, /* Syscall's original arg0. */
++ LOONGARCH_PC_REGNUM = 33, /* Program Counter. */
++ LOONGARCH_BADV_REGNUM = 34, /* Bad Vaddr for Addressing Exception. */
++ LOONGARCH_LINUX_NUM_GREGSET = 45, /* 32 GPR, ORIG_A0, PC, BADV, RESERVED 10. */
++ LOONGARCH_ARG_REGNUM = 8, /* r4-r11: general-purpose argument registers.
++ f0-f7: floating-point argument registers. */
++ LOONGARCH_FIRST_FP_REGNUM = LOONGARCH_LINUX_NUM_GREGSET,
++ LOONGARCH_LINUX_NUM_FPREGSET = 32,
++ LOONGARCH_FIRST_FCC_REGNUM = LOONGARCH_FIRST_FP_REGNUM + LOONGARCH_LINUX_NUM_FPREGSET,
++ LOONGARCH_LINUX_NUM_FCC = 8,
++ LOONGARCH_FCSR_REGNUM = LOONGARCH_FIRST_FCC_REGNUM + LOONGARCH_LINUX_NUM_FCC,
++};
++
++enum sw64_fputype
++{
++ SINGLE_FLOAT = 1,
++ DOUBLE_FLOAT = 2,
++};
++
++/* The set of LoongArch architectural features that we track that impact how
++ we configure the actual gdbarch instance. We hold one of these in the
++ gdbarch_tdep structure, and use it to distinguish between different
++ LoongArch gdbarch instances.
++
++ The information in here ideally comes from the target description,
++ however, if the target doesn't provide a target description then we will
++ create a default target description by first populating one of these
++ based on what we know about the binary being executed, and using that to
++ drive default target description creation. */
++
++struct sw64_gdbarch_features
++{
++ /* The size of the x-registers in bytes. This is either 4 (sw6432)
++ or 8 (sw6464). No other value is valid. Initialise to the invalid
++ 0 value so we can spot if one of these is used uninitialised. */
++ int xlen = 0;
++
++ /* The type of floating-point. This is either 1 (single float) or 2
++ (double float). No other value is valid. Initialise to the invalid
++ 0 value so we can spot if one of these is used uninitialised. */
++ int fputype = 0;
++
++ /* Equality operator. */
++ bool operator== (const struct sw64_gdbarch_features &rhs) const
++ {
++ return (xlen == rhs.xlen);
++ }
++
++ /* Inequality operator. */
++ bool operator!= (const struct sw64_gdbarch_features &rhs) const
++ {
++ return !((*this) == rhs);
++ }
++
++ /* Used by std::unordered_map to hash feature sets. */
++ std::size_t hash () const noexcept
++ {
++ std::size_t val = (xlen & 0x1f) << 5;
++ return val;
++ }
++};
++
++#ifdef GDBSERVER
++
++/* Create and return a target description that is compatible with FEATURES.
++ This is only used directly from the gdbserver where the created target
++ description is modified after it is return. */
++
++target_desc_up sw64_create_target_description
++ (const struct sw64_gdbarch_features features);
++
++#else
++
++/* Lookup an already existing target description matching FEATURES, or
++ create a new target description if this is the first time we have seen
++ FEATURES. For the same FEATURES the same target_desc is always
++ returned. This is important when trying to lookup gdbarch objects as
++ GDBARCH_LIST_LOOKUP_BY_INFO performs a pointer comparison on target
++ descriptions to find candidate gdbarch objects. */
++
++const target_desc *sw64_lookup_target_description
++ (const struct sw64_gdbarch_features features);
++
++#endif /* GDBSERVER */
++
++#endif /* ARCH_LOONGARCH_H */
+diff -Naur gdb-14.1-after-patch/gdb/configure.host gdb-14.1-sw64/gdb/configure.host
+--- gdb-14.1-after-patch/gdb/configure.host 2023-02-02 12:45:52.000000000 +0800
++++ gdb-14.1-sw64/gdb/configure.host 2025-03-03 10:59:13.080000000 +0800
+@@ -65,6 +65,7 @@
+ sparcv9 | sparc64) gdb_host_cpu=sparc ;;
+ s390*) gdb_host_cpu=s390 ;;
+ sh*) gdb_host_cpu=sh ;;
++sw64*) gdb_host_cpu=sw64 ;;
+ tilegx*) gdb_host_cpu=tilegx ;;
+ x86_64*) gdb_host_cpu=i386 ;;
+ m32r*) gdb_host_cpu=m32r ;;
+@@ -168,6 +169,8 @@
+ gdb_host=sol2
+ ;;
+
++sw64*-linux*) gdb_host=linux ;;
++
+ tilegx-*-linux*) gdb_host=linux ;;
+
+ vax-*-netbsd* | vax-*-knetbsd*-gnu)
+diff -Naur gdb-14.1-after-patch/gdb/configure.nat gdb-14.1-sw64/gdb/configure.nat
+--- gdb-14.1-after-patch/gdb/configure.nat 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/gdb/configure.nat 2025-03-03 10:59:13.080000000 +0800
+@@ -304,6 +304,11 @@
+ # Host: GNU/Linux SPARC
+ NATDEPFILES="${NATDEPFILES} sparc-nat.o sparc-linux-nat.o"
+ ;;
++ sw64)
++ # Host: SW64, running GNU/Linux.
++ NATDEPFILES="${NATDEPFILES} sw64-linux-nat.o linux-nat-trad.o \
++ nat/sw64-linux-watch.o"
++ ;;
+ tilegx)
+ # Host: Tilera TILE-Gx running GNU/Linux.
+ NATDEPFILES="${NATDEPFILES} tilegx-linux-nat.o"
+diff -Naur gdb-14.1-after-patch/gdb/configure.tgt gdb-14.1-sw64/gdb/configure.tgt
+--- gdb-14.1-after-patch/gdb/configure.tgt 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/gdb/configure.tgt 2025-03-03 10:59:13.080000000 +0800
+@@ -108,6 +108,11 @@
+ cpu_obs="riscv-tdep.o riscv-none-tdep.o arch/riscv.o \
+ ravenscar-thread.o riscv-ravenscar-thread.o";;
+
++sw64*-*-*)
++ # Target: SW64
++ cpu_obs="sw64-tdep.o arch/sw64.o"
++ ;;
++
+ x86_64-*-*)
+ cpu_obs="${i386_tobjs} ${amd64_tobjs}";;
+
+@@ -539,6 +544,12 @@
+ linux-tdep.o linux-record.o symfile-mem.o"
+ ;;
+
++sw64*-*-linux*)
++ # Target: SW64 running Linux
++ gdb_target_obs="sw64-linux-tdep.o glibc-tdep.o \
++ linux-tdep.o solib-svr4.o linux-record.o"
++ ;;
++
+ riscv*-*-freebsd*)
+ # Target: FreeBSD/riscv
+ gdb_target_obs="riscv-fbsd-tdep.o"
+diff -Naur gdb-14.1-after-patch/gdb/data-directory/Makefile.in gdb-14.1-sw64/gdb/data-directory/Makefile.in
+--- gdb-14.1-after-patch/gdb/data-directory/Makefile.in 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/gdb/data-directory/Makefile.in 2025-03-03 10:59:13.090000000 +0800
+@@ -58,6 +58,7 @@
+ mips-o32-linux.xml \
+ ppc-linux.xml \
+ ppc64-linux.xml \
++ sw64-linux.xml \
+ s390-linux.xml \
+ s390x-linux.xml \
+ sparc-linux.xml \
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64/cpu.c gdb-14.1-sw64/gdb/features/sw64/cpu.c
+--- gdb-14.1-after-patch/gdb/features/sw64/cpu.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64/cpu.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,48 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: base64.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_base64 (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.base");
++ tdesc_create_reg (feature, "r0", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r1", regnum++, 1, "general", 64, "code_ptr");
++ tdesc_create_reg (feature, "r2", regnum++, 1, "general", 64, "data_ptr");
++ tdesc_create_reg (feature, "r3", regnum++, 1, "general", 64, "data_ptr");
++ tdesc_create_reg (feature, "r4", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r5", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r6", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r7", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r8", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r9", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r10", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r11", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r12", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r13", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r14", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r15", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r16", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r17", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r18", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r19", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r20", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r21", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r22", regnum++, 1, "general", 64, "data_ptr");
++ tdesc_create_reg (feature, "r23", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r24", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r25", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r26", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r27", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r28", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r29", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r30", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "r31", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "orig_a0", regnum++, 1, "general", 64, "uint64");
++ tdesc_create_reg (feature, "pc", regnum++, 1, "general", 64, "code_ptr");
++ tdesc_create_reg (feature, "badv", regnum++, 1, "general", 64, "code_ptr");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64/cpu.xml gdb-14.1-sw64/gdb/features/sw64/cpu.xml
+--- gdb-14.1-after-patch/gdb/features/sw64/cpu.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64/cpu.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,45 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64/fpu.c gdb-14.1-sw64/gdb/features/sw64/fpu.c
+--- gdb-14.1-after-patch/gdb/features/sw64/fpu.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64/fpu.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,62 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: fpu.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_fpu (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.fpu");
++ tdesc_type_with_fields *type_with_fields;
++ type_with_fields = tdesc_create_union (feature, "fputype");
++ tdesc_type *field_type;
++ field_type = tdesc_named_type (feature, "ieee_single");
++ tdesc_add_field (type_with_fields, "f", field_type);
++ field_type = tdesc_named_type (feature, "ieee_double");
++ tdesc_add_field (type_with_fields, "d", field_type);
++
++ tdesc_create_reg (feature, "f0", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f1", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f2", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f3", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f4", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f5", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f6", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f7", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f8", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f9", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f10", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f11", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f12", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f13", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f14", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f15", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f16", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f17", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f18", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f19", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f20", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f21", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f22", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f23", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f24", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f25", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f26", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f27", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f28", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f29", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f30", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "f31", regnum++, 1, "float", 64, "fputype");
++ tdesc_create_reg (feature, "fcc0", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc1", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc2", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc3", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc4", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc5", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc6", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcc7", regnum++, 1, "float", 8, "uint8");
++ tdesc_create_reg (feature, "fcsr", regnum++, 1, "float", 32, "uint32");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64/fpu.xml gdb-14.1-sw64/gdb/features/sw64/fpu.xml
+--- gdb-14.1-after-patch/gdb/features/sw64/fpu.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64/fpu.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,57 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-cpu.c gdb-14.1-sw64/gdb/features/sw64-cpu.c
+--- gdb-14.1-after-patch/gdb/features/sw64-cpu.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-cpu.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,47 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: sw64-cpu.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_cpu (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.cpu");
++ tdesc_create_reg (feature, "r0", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r1", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r2", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r3", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r4", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r5", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r6", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r7", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r8", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r9", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r10", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r11", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r12", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r13", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r14", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "fp", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r16", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r17", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r18", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r19", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r20", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r21", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r22", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r23", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r24", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r25", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ra", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r27", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r28", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r29", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "sp", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r31", regnum++, 1, NULL, 64, "int");
++ regnum = 64;
++ tdesc_create_reg (feature, "pc", regnum++, 1, NULL, 64, "int");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-cpu.xml gdb-14.1-sw64/gdb/features/sw64-cpu.xml
+--- gdb-14.1-after-patch/gdb/features/sw64-cpu.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-cpu.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,44 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-efu.c gdb-14.1-sw64/gdb/features/sw64-efu.c
+--- gdb-14.1-after-patch/gdb/features/sw64-efu.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-efu.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,110 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: sw64-efu.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_efu (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.efu");
++ regnum = 67;
++ tdesc_create_reg (feature, "ef0", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef1", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef2", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef3", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef4", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef5", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef6", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef7", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef8", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef9", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef10", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef11", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef12", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef13", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef14", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef15", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef16", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef17", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef18", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef19", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef20", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef21", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef22", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef23", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef24", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef25", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef26", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef27", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef28", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef29", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef30", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef31", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef0", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef1", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef2", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef3", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef4", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef5", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef6", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef7", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef8", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef9", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef10", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef11", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef12", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef13", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef14", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef15", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef16", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef17", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef18", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef19", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef20", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef21", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef22", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef23", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef24", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef25", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef26", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef27", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef28", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef29", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef30", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef31", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef0", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef1", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef2", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef3", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef4", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef5", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef6", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef7", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef8", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef9", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef10", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef11", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef12", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef13", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef14", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef15", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef16", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef17", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef18", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef19", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef20", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef21", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef22", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef23", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef24", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef25", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef26", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef27", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef28", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef29", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef30", regnum++, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ef31", regnum++, 1, NULL, 64, "int");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-efu.xml gdb-14.1-sw64/gdb/features/sw64-efu.xml
+--- gdb-14.1-after-patch/gdb/features/sw64-efu.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-efu.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,106 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-fpu.c gdb-14.1-sw64/gdb/features/sw64-fpu.c
+--- gdb-14.1-after-patch/gdb/features/sw64-fpu.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-fpu.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,46 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: sw64-fpu.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_fpu (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.fpu");
++ regnum = 32;
++ tdesc_create_reg (feature, "f0", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f1", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f2", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f3", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f4", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f5", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f6", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f7", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f8", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f9", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f10", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f11", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f12", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f13", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f14", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f15", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f16", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f17", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f18", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f19", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f20", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f21", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f22", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f23", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f24", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f25", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f26", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f27", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f28", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f29", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f30", regnum++, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "fcsr", regnum++, 1, "float", 64, "int");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-fpu.xml gdb-14.1-sw64/gdb/features/sw64-fpu.xml
+--- gdb-14.1-after-patch/gdb/features/sw64-fpu.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-fpu.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,42 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-linux.c gdb-14.1-sw64/gdb/features/sw64-linux.c
+--- gdb-14.1-after-patch/gdb/features/sw64-linux.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-linux.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,129 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: sw64-linux.xml */
++
++#include "defs.h"
++#include "osabi.h"
++#include "target-descriptions.h"
++
++const struct target_desc *tdesc_sw64_linux;
++static void
++initialize_tdesc_sw64_linux (void)
++{
++ target_desc_up result = allocate_target_description ();
++ set_tdesc_architecture (result.get (), bfd_scan_arch ("sw64"));
++
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result.get (), "org.gnu.gdb.sw64.cpu");
++ tdesc_create_reg (feature, "r0", 0, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r1", 1, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r2", 2, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r3", 3, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r4", 4, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r5", 5, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r6", 6, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r7", 7, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r8", 8, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r9", 9, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r10", 10, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r11", 11, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r12", 12, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r13", 13, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r14", 14, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "fp", 15, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r16", 16, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r17", 17, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r18", 18, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r19", 19, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r20", 20, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r21", 21, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r22", 22, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r23", 23, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r24", 24, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r25", 25, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "ra", 26, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r27", 27, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r28", 28, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r29", 29, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "sp", 30, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "r31", 31, 1, NULL, 64, "int");
++ tdesc_create_reg (feature, "pc", 64, 1, NULL, 64, "int");
++
++ feature = tdesc_create_feature (result.get (), "org.gnu.gdb.sw64.fpu");
++ tdesc_create_reg (feature, "f0", 32, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f1", 33, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f2", 34, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f3", 35, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f4", 36, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f5", 37, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f6", 38, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f7", 39, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f8", 40, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f9", 41, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f10", 42, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f11", 43, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f12", 44, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f13", 45, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f14", 46, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f15", 47, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f16", 48, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f17", 49, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f18", 50, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f19", 51, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f20", 52, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f21", 53, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f22", 54, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f23", 55, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f24", 56, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f25", 57, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f26", 58, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f27", 59, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f28", 60, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f29", 61, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "f30", 62, 1, NULL, 64, "ieee_double");
++ tdesc_create_reg (feature, "fcsr", 63, 1, "float", 64, "int");
++
++ feature = tdesc_create_feature (result.get (), "org.gnu.gdb.sw64.vec");
++ tdesc_create_reg (feature, "V0", 167, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V1", 168, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V2", 169, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V3", 170, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V4", 171, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V5", 172, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V6", 173, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V7", 174, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V8", 175, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V9", 176, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V10", 177, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V11", 178, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V12", 179, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V13", 180, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V14", 181, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V15", 182, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V16", 183, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V17", 184, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V18", 185, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V19", 186, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V20", 187, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V21", 188, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V22", 189, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V23", 190, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V24", 191, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V25", 192, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V26", 193, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V27", 194, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V28", 195, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V29", 196, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V30", 197, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V31", 198, 1, NULL, 512, "int");
++
++ feature = tdesc_create_feature (result.get (), "org.gnu.gdb.sw64.linux");
++ tdesc_create_reg (feature, "", 65, 1, "system", 64, "int");
++ tdesc_create_reg (feature, "unique", 66, 1, "system", 64, "int");
++ tdesc_create_reg (feature, "da_match", 163, 1, "system", 64, "int");
++ tdesc_create_reg (feature, "da_mask", 164, 1, "system", 64, "int");
++ tdesc_create_reg (feature, "dv_match", 165, 1, "system", 64, "int");
++ tdesc_create_reg (feature, "dv_mask", 166, 1, "system", 64, "int");
++
++ tdesc_sw64_linux = result.release ();
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-linux.xml gdb-14.1-sw64/gdb/features/sw64-linux.xml
+--- gdb-14.1-after-patch/gdb/features/sw64-linux.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-linux.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,18 @@
++
++
++
++
++
++ sw64
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-vec.c gdb-14.1-sw64/gdb/features/sw64-vec.c
+--- gdb-14.1-after-patch/gdb/features/sw64-vec.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-vec.c 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,46 @@
++/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro:
++ Original: sw64-vec.xml */
++
++#include "gdbsupport/tdesc.h"
++
++static int
++create_feature_sw64_vec (struct target_desc *result, long regnum)
++{
++ struct tdesc_feature *feature;
++
++ feature = tdesc_create_feature (result, "org.gnu.gdb.sw64.vec");
++ regnum = 167;
++ tdesc_create_reg (feature, "V0", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V1", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V2", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V3", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V4", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V5", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V6", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V7", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V8", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V9", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V10", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V11", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V12", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V13", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V14", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V15", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V16", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V17", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V18", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V19", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V20", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V21", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V22", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V23", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V24", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V25", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V26", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V27", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V28", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V29", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V30", regnum++, 1, NULL, 512, "int");
++ tdesc_create_reg (feature, "V31", regnum++, 1, NULL, 512, "int");
++ return regnum;
++}
+diff -Naur gdb-14.1-after-patch/gdb/features/sw64-vec.xml gdb-14.1-sw64/gdb/features/sw64-vec.xml
+--- gdb-14.1-after-patch/gdb/features/sw64-vec.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/features/sw64-vec.xml 2025-03-03 10:59:13.130000000 +0800
+@@ -0,0 +1,43 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/linux-nat.c gdb-14.1-sw64/gdb/linux-nat.c
+--- gdb-14.1-after-patch/gdb/linux-nat.c 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/gdb/linux-nat.c 2025-03-03 10:59:13.150000000 +0800
+@@ -237,7 +237,11 @@
+ static struct lwp_info *add_lwp (ptid_t ptid);
+ static void purge_lwp_list (int pid);
+ static void delete_lwp (ptid_t ptid);
++#ifdef __sw_64__
++struct lwp_info *find_lwp_pid (ptid_t ptid);
++#else
+ static struct lwp_info *find_lwp_pid (ptid_t ptid);
++#endif
+
+ static int lwp_status_pending_p (struct lwp_info *lp);
+
+@@ -840,7 +844,11 @@
+ /* Return a pointer to the structure describing the LWP corresponding
+ to PID. If no corresponding LWP could be found, return NULL. */
+
++#ifdef __sw_64__
++struct lwp_info *
++#else
+ static struct lwp_info *
++#endif
+ find_lwp_pid (ptid_t ptid)
+ {
+ int lwp;
+diff -Naur gdb-14.1-after-patch/gdb/Makefile.in gdb-14.1-sw64/gdb/Makefile.in
+--- gdb-14.1-after-patch/gdb/Makefile.in 2025-03-03 09:43:44.070000000 +0800
++++ gdb-14.1-sw64/gdb/Makefile.in 2025-03-03 10:59:13.660000000 +0800
+@@ -737,6 +737,8 @@
+ ia64-vms-tdep.o \
+ loongarch-linux-tdep.o \
+ loongarch-tdep.o \
++ sw64-linux-tdep.o \
++ sw64-tdep.o \
+ mips-fbsd-tdep.o \
+ mips-linux-tdep.o \
+ mips-netbsd-tdep.o \
+@@ -1394,6 +1396,7 @@
+ linux-tdep.h \
+ location.h \
+ loongarch-tdep.h \
++ sw64-tdep.h \
+ m2-lang.h \
+ m32r-tdep.h \
+ m68k-tdep.h \
+@@ -1745,6 +1748,9 @@
+ loongarch-linux-nat.c \
+ loongarch-linux-tdep.c \
+ loongarch-tdep.c \
++ sw64-linux-nat.c \
++ sw64-linux-tdep.c \
++ sw64-tdep.c \
+ m32r-linux-nat.c \
+ m32r-linux-tdep.c \
+ m32r-tdep.c \
+diff -Naur gdb-14.1-after-patch/gdb/nat/sw64-linux-watch.c gdb-14.1-sw64/gdb/nat/sw64-linux-watch.c
+--- gdb-14.1-after-patch/gdb/nat/sw64-linux-watch.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/nat/sw64-linux-watch.c 2025-03-03 10:59:13.160000000 +0800
+@@ -0,0 +1,182 @@
++/* Copyright (C) 2009-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "gdbsupport/common-defs.h"
++#include "nat/gdb_ptrace.h"
++#include "sw64-linux-watch.h"
++
++#ifdef GDBSERVER
++extern int debug_threads;
++#define debug(format, ... ) do { \
++ if (debug_threads) {\
++ fprintf(stdout, "%s,%d:%s:", __FILE__, __LINE__,__func__); \
++ fprintf(stdout, format, ##__VA_ARGS__); \
++ fprintf(stdout, "\n"); \
++ fflush(stdout); \
++ } \
++ } while (0)
++extern int delete_gdb_breakpoint (char z_type, CORE_ADDR addr, int kind);
++#else
++extern int debug_infrun;
++#define debug(format, ... ) do { \
++ if (debug_infrun){ \
++ fprintf(stdout, "%s,%d:%s:", __FILE__, __LINE__,__func__); \
++ fprintf(stdout, format, ##__VA_ARGS__); \
++ fprintf(stdout, "\n"); \
++ fflush(stdout); \
++ } \
++ } while (0)
++#endif
++
++int read_debug_register (pid_t pid, int regno, long *val)
++{
++ int ret = 0;
++ errno = 0;
++ //printf("%s: pid = %d, regno = %d *val = %d\n", __FUNCTION__, pid, regno, *val);
++ if (SPE_LWP(pid))
++ ret = ptrace (PT_READ_U, pid, (PTRACE_TYPE_ARG3) regno, val);
++ else
++ *val = ptrace (PT_READ_U, pid, (PTRACE_TYPE_ARG3) regno, 0);
++ //printf("%s: ret = %d, *val = %d\n", __FUNCTION__, ret, *val);
++ if ( ret < 0 || errno != 0)
++ {
++ warning("lwp %x write $%d failed \n", pid, regno);
++ return 0;
++ }
++ return 1;
++}
++
++int store_debug_register (pid_t pid, int regno, long val)
++{
++ int ret;
++ errno = 0;
++ //printf("%s: pid = %d, regno = %d val = %d\n", __FUNCTION__, pid, regno, val);
++ //debug("store tid %x CSR %d, val = %#lx", pid, regno, val);
++ ret = ptrace (PT_WRITE_U, pid, (PTRACE_TYPE_ARG3) regno, val);
++ //printf("%s: ret = %d\n", __FUNCTION__, ret);
++ if ( ret < 0 || errno != 0)
++ {
++ warning("lwp %x write $%d failed \n", pid, regno);
++ return 0;
++ }
++ return 1;
++}
++
++int is_power_of_2 (int val)
++{
++ int i, onecount;
++
++ onecount = 0;
++ for (i = 0; i < 8 * sizeof (val); i++)
++ if (val & (1 << i))
++ onecount++;
++
++ return onecount <= 1;
++}
++
++int sw64_read_insn(pid_t pid, ulong pc)
++{
++ int insn;
++ long tmp, data;
++
++ tmp = pc & ~0x7;
++ data = ptrace(PTRACE_PEEKTEXT, pid, tmp, 0);
++ if ( pc & 0x7)
++ insn = (int)(data>>32);
++ else
++ insn = (int)data;
++ return insn;
++}
++
++int sw64_linux_try_one_watch (pid_t lwpid, struct arch_lwp_info *info,
++ enum sw64_hw_bp_type wpt_type, long addr, int len)
++{
++ long data;
++ int found = 0;
++ struct pt_watch_regs *wpt = info->wpt;
++
++ if (!addr || !is_power_of_2 (len))
++ return 0;
++ if ( wpt_type == sw64_vstore)
++ {
++ debug("insert master wp %lx, dv_match len = %d", (long)addr, len);
++
++ info->value_address = addr; //saved
++ wpt[1].match = ptrace(PTRACE_PEEKDATA, lwpid, addr, 0); //dv_match
++ wpt[1].mask = (len &0x8)?0xffffffffffffffffUL:0xffffffffUL; //dv_mask
++ wpt[1].match &= wpt[1].mask;
++ wpt[1].valid = 1;
++
++ if (wpt[0].valid)
++ {
++ data = wpt[0].match & ((1L<<53)-1);
++
++ wpt[0].match = sw64_write;
++ wpt->match <<= 53;
++ wpt->match |= data;
++ /* wpt->mask not changed */
++ }
++ }
++ else
++ {
++ debug("insert master wp %lx, da_match len = %d", (long)addr, len);
++ wpt->match = wpt_type&0x3;
++ wpt->match <<= 53;
++ wpt->match |= addr & ((1L<<53)-1);
++ data = len -1;
++ wpt->mask = ~data & ((1L<<53)-1);
++ wpt->valid = 1;
++ }
++
++ found = 1;
++
++ if (!found)
++ error("hardware wpt resource empty\n");
++ return 1;
++}
++
++
++int sw64_linux_del_one_watch (pid_t lwpid, struct arch_lwp_info *info,
++ enum sw64_hw_bp_type wpt_type, long addr, int len)
++{
++ int deleted_one = 0;
++ long match;
++ struct pt_watch_regs *wpt = info->wpt;
++
++ if ( wpt_type == sw64_vstore )
++ {
++ wpt[1].valid = 0;
++ wpt[0].valid = 0;
++ // debug("to remove master wp %#lx, dv_match", (long)addr);
++ info->value_address = 0L; //clr the saved
++ return 1;
++ }
++ match = wpt_type&0x3;
++ match <<= 53;
++ match |= addr&((1L<<53)-1);
++
++ if ( wpt->valid && (match == wpt->match ))
++ {
++ wpt->match &= ~(0x3L<<53);
++ wpt->valid = 0;
++ // deleted_one ++;
++ // debug("to remove master wp %#lx, da_match", (long)addr);
++ return 1;
++ }
++
++ return deleted_one>0;
++}
+diff -Naur gdb-14.1-after-patch/gdb/nat/sw64-linux-watch.h gdb-14.1-sw64/gdb/nat/sw64-linux-watch.h
+--- gdb-14.1-after-patch/gdb/nat/sw64-linux-watch.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/nat/sw64-linux-watch.h 2025-03-03 10:59:13.160000000 +0800
+@@ -0,0 +1,121 @@
++/* Copyright (C) 2009-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#ifndef NAT_SW64_LINUX_WATCH_H
++#define NAT_SW64_LINUX_WATCH_H
++
++#include
++#include "gdbsupport/break-common.h"
++
++#if 0
++extern int debug_infrun;
++#define debug(format, ... ) do { \
++ if (debug_infrun){ \
++ fprintf(stdout, "%s,%d:%s:", __FILE__, __LINE__,__func__); \
++ fprintf(stdout, format, ##__VA_ARGS__); \
++ fprintf(stdout, "\n"); \
++ fflush(stdout); \
++ } \
++ } while (0)
++#endif
++
++#define SPE_LWP(pid) (pid & (1 << 31))
++
++enum {
++ REG_R0 = 0,
++ REG_SP = 30,
++ REG_F0 = 32,
++ REG_FPCR = 63,
++ REG_PC = 64,
++ UNIQUE_ADDR =65,
++ REG_V0F1 = 67,
++ SPE_V0 = 70,
++ REG_V0F2 = 99,
++ REG_V0F3 = 131,
++ M_DA_MATCH = 163,
++ M_DA_MASK = 164,
++ M_DV_MATCH = 165,
++ M_DV_MASK = 166,
++ M_DC_CTL = 167,
++#ifdef SW8A
++ M_MATCH_CTL = 167,
++#endif
++// Liu Hanxu: refer to linux-stable-sw/arch/sw64/include/uapi/asm/ptrace.h
++};
++
++enum sw64_hw_bp_type
++{
++ sw64_none = 0, /* not match, or Execute HW breakpoint */
++ sw64_read = 1, /* Read HW watchpoint */
++ sw64_write = 2, /* Common HW watchpoint */
++ sw64_access = 3, /* Access HW watchpoint */
++ sw64_vstore = 4,
++};
++
++/* A value of zero in a watchlo indicates that it is available. */
++
++struct pt_watch_regs
++{
++ uint64_t match;
++ uint64_t mask;
++ int valid;
++} __attribute__ ((aligned (8)));
++
++
++/* Per-thread arch-specific data we want to keep. */
++#define MAX_DA_MATECH 2
++struct arch_lwp_info
++{
++ /* Non-zero if our copy differs from what's recorded in the thread. */
++ int watch_registers_changed;
++ int watch_matched;
++
++ struct pt_watch_regs wpt[MAX_DA_MATECH];
++
++ /* Cached stopped data address. */
++ CORE_ADDR stopped_data_address;
++ CORE_ADDR value_address;
++ /* tls cs */
++ long ebxio_def0;
++};
++
++
++/* We keep list of all watchpoints we should install and calculate the
++ watch register values each time the list changes. This allows for
++ easy sharing of watch registers for more than one watchpoint. */
++
++struct sw64_watchpoint
++{
++ CORE_ADDR addr;
++ int len;
++ enum target_hw_bp_type type;
++ struct sw64_watchpoint *next;
++};
++
++int is_power_of_2 (int val);
++
++int sw64_linux_try_one_watch (pid_t , struct arch_lwp_info *, enum sw64_hw_bp_type , long , int );
++
++int sw64_linux_del_one_watch (pid_t , struct arch_lwp_info *, enum sw64_hw_bp_type , long , int );
++
++int read_debug_register (pid_t pid, int regno, long *val);
++
++int store_debug_register (pid_t pid, int regno, long val);
++
++int sw64_read_insn (pid_t pid, ulong pc);
++
++#endif /* NAT_SW64_LINUX_WATCH_H */
+diff -Naur gdb-14.1-after-patch/gdb/regformats/sw64-linux.dat gdb-14.1-sw64/gdb/regformats/sw64-linux.dat
+--- gdb-14.1-after-patch/gdb/regformats/sw64-linux.dat 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/regformats/sw64-linux.dat 2025-03-03 10:59:13.190000000 +0800
+@@ -0,0 +1,168 @@
++# THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi :set ro:
++# Generated from: sw64-linux.xml
++name:sw64_linux
++xmltarget:sw64-linux.xml
++expedite:
++64:r0
++64:r1
++64:r2
++64:r3
++64:r4
++64:r5
++64:r6
++64:r7
++64:r8
++64:r9
++64:r10
++64:r11
++64:r12
++64:r13
++64:r14
++64:fp
++64:r16
++64:r17
++64:r18
++64:r19
++64:r20
++64:r21
++64:r22
++64:r23
++64:r24
++64:r25
++64:ra
++64:r27
++64:r28
++64:r29
++64:sp
++64:r31
++64:f0
++64:f1
++64:f2
++64:f3
++64:f4
++64:f5
++64:f6
++64:f7
++64:f8
++64:f9
++64:f10
++64:f11
++64:f12
++64:f13
++64:f14
++64:f15
++64:f16
++64:f17
++64:f18
++64:f19
++64:f20
++64:f21
++64:f22
++64:f23
++64:f24
++64:f25
++64:f26
++64:f27
++64:f28
++64:f29
++64:f30
++64:fcsr
++64:pc
++64:
++64:unique
++64:ef0
++64:ef1
++64:ef2
++64:ef3
++64:ef4
++64:ef5
++64:ef6
++64:ef7
++64:ef8
++64:ef9
++64:ef10
++64:ef11
++64:ef12
++64:ef13
++64:ef14
++64:ef15
++64:ef16
++64:ef17
++64:ef18
++64:ef19
++64:ef20
++64:ef21
++64:ef22
++64:ef23
++64:ef24
++64:ef25
++64:ef26
++64:ef27
++64:ef28
++64:ef29
++64:ef30
++64:ef31
++64:ef0
++64:ef1
++64:ef2
++64:ef3
++64:ef4
++64:ef5
++64:ef6
++64:ef7
++64:ef8
++64:ef9
++64:ef10
++64:ef11
++64:ef12
++64:ef13
++64:ef14
++64:ef15
++64:ef16
++64:ef17
++64:ef18
++64:ef19
++64:ef20
++64:ef21
++64:ef22
++64:ef23
++64:ef24
++64:ef25
++64:ef26
++64:ef27
++64:ef28
++64:ef29
++64:ef30
++64:ef31
++64:ef0
++64:ef1
++64:ef2
++64:ef3
++64:ef4
++64:ef5
++64:ef6
++64:ef7
++64:ef8
++64:ef9
++64:ef10
++64:ef11
++64:ef12
++64:ef13
++64:ef14
++64:ef15
++64:ef16
++64:ef17
++64:ef18
++64:ef19
++64:ef20
++64:ef21
++64:ef22
++64:ef23
++64:ef24
++64:ef25
++64:ef26
++64:ef27
++64:ef28
++64:ef29
++64:ef30
++64:ef31
+diff -Naur gdb-14.1-after-patch/gdb/sw64-linux-nat.c gdb-14.1-sw64/gdb/sw64-linux-nat.c
+--- gdb-14.1-after-patch/gdb/sw64-linux-nat.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/sw64-linux-nat.c 2025-03-03 10:59:13.210000000 +0800
+@@ -0,0 +1,466 @@
++/* Low level SW64 GNU/Linux interface, for GDB when running native.
++ Copyright (C) 2005-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "defs.h"
++#include "target.h"
++#include "regcache.h"
++#include "linux-nat-trad.h"
++
++#include "sw64-tdep.h"
++#include "gdbarch.h"
++
++#include "nat/gdb_ptrace.h"
++#include
++
++#include
++#include "gregset.h"
++
++#include "nat/sw64-linux-watch.h"
++#include "sw64-linux-tdep.h"
++#include "inferior.h" // defs of inferior_ptid
++#include "nat/linux-ptrace.h" // defs of TRAP_HWBKPT
++#include "linux-nat.h" // defs of lwp_info
++
++/* The address of UNIQUE for ptrace. */
++#define SW64_UNIQUE_PTRACE_ADDR 65
++
++#ifndef LHX20240711
++static void sw64_linux_new_thread (struct lwp_info *lwp);
++
++/* Function to call when a thread is being deleted. */
++void sw64_linux_delete_thread (struct arch_lwp_info *arch_lwp);
++
++//void sw64_linux_prepare_to_resume (struct lwp_info *lwp);
++
++extern struct lwp_info *find_lwp_pid (ptid_t);
++
++enum sw64_hw_bp_type sw64_hw_bp_type_from_target_hw_bp_type (enum target_hw_bp_type);
++#endif
++
++class sw64_linux_nat_target final : public linux_nat_trad_target
++{
++#ifndef LHX20240710
++public:
++ void close () override;
++
++ // LHX: Support for hardware watchpoint
++ int can_use_hw_breakpoint (enum bptype, int, int) override;
++
++ int remove_watchpoint (CORE_ADDR, int, enum target_hw_bp_type,
++ struct expression *) override;
++
++ int insert_watchpoint (CORE_ADDR, int, enum target_hw_bp_type,
++ struct expression *) override;
++
++ bool stopped_by_watchpoint () override;
++
++ bool stopped_data_address (CORE_ADDR *) override;
++
++ // LHX: Not implemented.
++ //int region_ok_for_hw_watchpoint (CORE_ADDR, int) override;
++
++ /* LHX: Used for watch registers. */
++ void low_new_thread (struct lwp_info *lp) override
++ { sw64_linux_new_thread (lp); }
++
++ void low_delete_thread (struct arch_lwp_info *lp) override
++ { sw64_linux_delete_thread (lp); }
++
++ void low_prepare_to_resume (struct lwp_info *lp) override;
++ //{ sw64_linux_prepare_to_resume (lp); }
++#endif
++
++ const struct target_desc *read_description () override;
++
++protected:
++ /* Override linux_nat_trad_target methods. */
++ CORE_ADDR register_u_offset (struct gdbarch *gdbarch,
++ int regno, int store_p) override;
++};
++
++static sw64_linux_nat_target the_sw64_linux_nat_target;
++
++/* See the comment in m68k-tdep.c regarding the utility of these
++ functions. */
++
++void
++supply_gregset (struct regcache *regcache, const gdb_gregset_t *gregsetp)
++{
++ const long *regp = (const long *)gregsetp;
++
++ /* PC is in slot 32, UNIQUE is in slot 33. */
++ sw64_supply_int_regs (regcache, -1, regp, regp + 31, regp + 32);
++}
++
++void
++fill_gregset (const struct regcache *regcache,
++ gdb_gregset_t *gregsetp, int regno)
++{
++ long *regp = (long *)gregsetp;
++
++ /* PC is in slot 32, UNIQUE is in slot 33. */
++ sw64_fill_int_regs (regcache, regno, regp, regp + 31, regp + 32);
++}
++
++/* Now we do the same thing for floating-point registers.
++ Again, see the comments in m68k-tdep.c. */
++
++void
++supply_fpregset (struct regcache *regcache, const gdb_fpregset_t *fpregsetp)
++{
++ const long *regp = (const long *)fpregsetp;
++
++ /* FPCR is in slot 32. */
++ sw64_supply_fp_regs (regcache, -1, regp, regp + 31);
++}
++
++void
++fill_fpregset (const struct regcache *regcache,
++ gdb_fpregset_t *fpregsetp, int regno)
++{
++ long *regp = (long *)fpregsetp;
++
++ /* FPCR is in slot 32. */
++ sw64_fill_fp_regs (regcache, regno, regp, regp + 31);
++}
++
++CORE_ADDR
++sw64_linux_nat_target::register_u_offset (struct gdbarch *gdbarch,
++ int regno, int store_p)
++{
++ if (regno == gdbarch_pc_regnum (gdbarch))
++ return SW64_PC_REGNUM;
++ if (regno == SW64_UNIQUE_REGNUM)
++ return SW64_UNIQUE_PTRACE_ADDR;
++ if (regno < gdbarch_fp0_regnum (gdbarch))
++ return GPR_BASE + regno;
++ else
++ return FPR_BASE + regno - gdbarch_fp0_regnum (gdbarch);
++}
++
++const struct target_desc *
++sw64_linux_nat_target::read_description ()
++{
++ return tdesc_sw64_linux;
++}
++
++#ifndef LHX20240710
++/* Target to_can_use_hw_breakpoint implementation. Return 1 if we can
++ handle the specified watch type. */
++
++int
++sw64_linux_nat_target::can_use_hw_breakpoint (enum bptype type,
++ int cnt, int ot)
++{
++ if (type == bp_hardware_breakpoint &&
++ cnt > 2 )
++ return -1;
++ return 1;
++}
++
++/* Target to_stopped_by_watchpoint implementation. Return 1 if
++ stopped by watchpoint. The watchhi R and W bits indicate the watch
++ register triggered. */
++
++bool
++sw64_linux_nat_target::stopped_by_watchpoint ()
++{
++ pid_t lwpid = inferior_ptid.lwp ();//ptid_get_lwp(inferior_ptid);
++ struct lwp_info *lwp = find_lwp_pid (inferior_ptid);
++ siginfo_t siginfo;
++
++ gdb_assert (lwp != NULL);
++
++ if (SPE_LWP(lwpid))
++ {
++ //debug("keep going after slave-wp, can\'t ptrace.");
++ return true;
++ }
++
++ /* Retrieve siginfo. */
++ errno = 0;
++ ptrace (PTRACE_GETSIGINFO, lwpid, 0, &siginfo);
++ if (errno != 0)
++ {
++ warning("%s:%d GETSIGINFO return %d\n", __FILE__, __LINE__, errno);
++ return false;
++ }
++ //debug("si_code=%#x si_signo=%d si_errno = %x pc=%#lx, data_address %#lx",
++ //siginfo.si_code, siginfo.si_signo,siginfo.si_errno,(long)siginfo.si_value.sival_ptr, (long)siginfo.si_addr);
++ /* This must be a hardware breakpoint. */
++ if (siginfo.si_signo != SIGTRAP
++ || (siginfo.si_code & 0xffff) != TRAP_HWBKPT)
++ return false;
++
++ /* siginfo should return the accessed data address, not pc */
++ switch (siginfo.si_errno){
++ case 1: //si_errno[0]:
++ case 5:
++ case 7: //si_errno[2]:
++ lwp->arch_private->stopped_data_address
++ = (CORE_ADDR) (uintptr_t) (lwp->arch_private->wpt->match & ((1L<<53)-1));
++ lwp->arch_private->watch_matched = 1;
++ break;
++ case 2: //si_errno[`]
++ lwp->arch_private->stopped_data_address
++ = (CORE_ADDR) (uintptr_t) (lwp->arch_private->value_address); // get the saved
++ lwp->arch_private->watch_matched = 1;
++ break;
++ default:
++ ;;
++ }
++ return true;
++}
++
++/* Target to_stopped_data_address implementation. Set the address
++ where the watch triggered (if known). Return 1 if the address was
++ known. */
++
++bool
++sw64_linux_nat_target::stopped_data_address (CORE_ADDR *paddr)
++{
++ struct lwp_info *lwp = find_lwp_pid (inferior_ptid);
++
++ gdb_assert (lwp != NULL);
++ gdb_assert (lwp->arch_private != NULL);
++ if (lwp->arch_private->watch_matched)
++ *paddr = lwp->arch_private->stopped_data_address;
++ return lwp->arch_private->watch_matched;
++}
++
++#if 0 // lhx: This function is not implemented.
++/* Target to_region_ok_for_hw_watchpoint implementation. Return 1 if
++ the specified region can be covered by the watch registers. */
++
++int
++sw64_linux_nat_target::region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
++{
++ /* Can not set watchpoints for zero or negative lengths. */
++ if (len <= 0)
++ return 0;
++
++ /* The current ptrace interface can only handle watchpoints that are a
++ * power of 2. */
++ if ((len & (len - 1)) != 0)
++ return 0;
++
++ /* All tests passed so we must be able to set a watchpoint. */
++ return 1;
++}
++#endif
++
++enum sw64_hw_bp_type
++sw64_hw_bp_type_from_target_hw_bp_type (enum target_hw_bp_type raw_type)
++{
++ switch (raw_type)
++ {
++ case hw_execute:
++ /* permit r/w */
++ return sw64_none;
++ case hw_write:
++ return sw64_write;
++ case hw_read:
++ return sw64_read;
++ case hw_access:
++ return sw64_access;
++ case hw_vstore:
++ return sw64_vstore;
++ default:
++ error ( "bad raw breakpoint type %d", (int) raw_type);
++ }
++}
++
++/* Handle thread creation. We need to copy the breakpoints and watchpoints
++ * in the parent thread to the child thread. */
++
++static void
++sw64_linux_new_thread (struct lwp_info *lp)
++{
++ struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
++
++ /* Mark that all the hardware breakpoint/watchpoint register pairs
++ * for this thread need to be initialized. */
++
++ lp->arch_private = info;
++}
++
++void
++sw64_linux_delete_thread (struct arch_lwp_info *arch_lwp)
++{
++ xfree (arch_lwp);
++}
++
++/* Target to_insert_watchpoint implementation. Try to insert a new
++ watch. Return zero on success. */
++
++int
++sw64_linux_nat_target::insert_watchpoint (CORE_ADDR addr, int len,
++ enum target_hw_bp_type type,
++ struct expression *cond)
++{
++ enum sw64_hw_bp_type watch_type;
++ struct lwp_info *lp = find_lwp_pid(inferior_ptid);
++ struct arch_lwp_info *priv;
++
++ //debug("%s insert wpt at %#lx type %d", target_pid_to_str(inferior_ptid),
++ // addr, type);
++ if ( !lp->arch_private )
++ sw64_linux_new_thread(lp);
++ priv = lp->arch_private;
++
++ watch_type = sw64_hw_bp_type_from_target_hw_bp_type(type);
++
++ if (sw64_linux_try_one_watch(inferior_ptid.lwp (),/*ptid_get_lwp(inferior_ptid),*/ priv,
++ watch_type,addr,len))
++ {
++ priv->watch_registers_changed =1;
++ return 0;
++ }
++ return -1;
++}
++
++/* Target to_remove_watchpoint implementation. Try to remove a watch.
++ Return zero on success. */
++
++int
++sw64_linux_nat_target::remove_watchpoint (CORE_ADDR addr, int len,
++ enum target_hw_bp_type type,
++ struct expression *cond)
++{
++ enum sw64_hw_bp_type watch_type;
++ struct lwp_info *lp = find_lwp_pid(inferior_ptid);
++ pid_t lwp = inferior_ptid.lwp ();/*ptid_get_lwp (inferior_ptid);*/
++ struct arch_lwp_info *priv = lp->arch_private;
++
++ watch_type = sw64_hw_bp_type_from_target_hw_bp_type(type);
++ if (sw64_linux_del_one_watch(lwp, priv, watch_type,addr,len))
++ {
++ priv->watch_registers_changed =1;
++ return 0;
++ }
++ return -1;
++}
++
++/* Target to_close implementation. Free any watches and call the
++ super implementation. */
++
++void
++sw64_linux_nat_target::close ()
++{
++ linux_nat_trad_target::close ();
++}
++
++/* Called when resuming a thread.
++ The hardware debug registers are updated when there is any change. */
++
++void
++sw64_linux_nat_target::low_prepare_to_resume (struct lwp_info *lwp)
++{
++ int lwpid, i;
++ struct arch_lwp_info *priv = lwp->arch_private;
++
++ //ptid_t ptid = ptid_of_lwp (lwp);
++ lwpid = lwp->ptid.lwp();//ptid_get_pid (ptid);
++ /* priv's NULL means this is the main thread still going through the shell,
++ * or, no watchpoint has been set yet. In that case, there's
++ * nothing to do. */
++ if ( priv && priv->watch_registers_changed)
++ {
++ /* Only update the watch registers if we have set or unset a
++ watchpoint already. */
++ /* when detach, all of priv->....valid should be zero */
++ if ( priv->wpt[1].valid )
++ {
++ // debug("write master dv_match %#lx, mask %#lx", priv->wpt[1].match, priv->wpt[1].mask);
++ store_debug_register (lwpid, M_DV_MATCH, priv->wpt[1].match);
++ store_debug_register (lwpid, M_DV_MASK, priv->wpt[1].mask);
++ }
++ if ( priv->wpt[0].valid )
++ {
++ // debug("write master da_match %#lx, mask %#lx", priv->wpt->match, priv->wpt->mask);
++ store_debug_register (lwpid, M_DA_MATCH, priv->wpt->match);
++ store_debug_register (lwpid, M_DA_MASK, priv->wpt->mask);
++ }
++
++ i = (priv->wpt[1].valid<<1) | priv->wpt[0].valid;
++
++ // setting dv_ctl
++ /*
++ *pcb->match_ctl:
++ * [0] DA_MATCH
++ * [1] DV_MATCH
++ * [2] DAV_MATCH
++ * [3] IA_MATCH
++ * [4] IV_MATCH
++ * [5] IDA_MATCH
++ * [8:9] match_ctl_mode
++ * [0:0]: not match
++ * [0:1]: match when read addr
++ * [1:0]: match when write addr
++ * [1:1]: match when read & write addr
++ *
++ * #define DA_MATCH 0x1
++ * #define DV_MATCH 0x2
++ * #define DAV_MATCH 0x4
++ * #define IA_MATCH 0x8
++ * #define IV_MATCH 0x10
++ * #define IDA_MATCH 0x20
++ */
++
++ switch (i)
++ {
++ //da_match
++ case 0:
++ case 1:
++ //store_debug_register (lwpid, M_DV_MATCH+2, 0L);
++#ifdef SW8A
++ store_debug_register (lwpid, M_MATCH_CTL, 0x301);
++#endif
++ break;
++ //dv_match
++ case 2:
++ store_debug_register (lwpid, M_DC_CTL, 1);
++#ifdef SW8A
++ store_debug_register (lwpid, M_MATCH_CTL, 0x302);
++#endif
++ break;
++ //dva_match
++ case 3:
++ store_debug_register (lwpid, M_DC_CTL, 3);
++#ifdef SW8A
++ store_debug_register (lwpid, M_MATCH_CTL, 0x304);
++#endif
++ break;
++ default:
++ ;;
++ }
++
++ priv->watch_registers_changed = 0;
++ priv->watch_matched = 0;
++ }
++}
++#endif
++
++void _initialize_sw64_linux_nat ();
++
++void
++_initialize_sw64_linux_nat ()
++{
++ linux_target = &the_sw64_linux_nat_target;
++ add_inf_child_target (&the_sw64_linux_nat_target);
++}
+diff -Naur gdb-14.1-after-patch/gdb/sw64-linux-tdep.c gdb-14.1-sw64/gdb/sw64-linux-tdep.c
+--- gdb-14.1-after-patch/gdb/sw64-linux-tdep.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/sw64-linux-tdep.c 2025-03-03 10:59:13.660000000 +0800
+@@ -0,0 +1,1434 @@
++/* Target-dependent code for GNU/Linux on SW64.
++ Copyright (C) 2002-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "defs.h"
++#include "frame.h"
++#include "osabi.h"
++#include "solib-svr4.h"
++#include "symtab.h"
++#include "regset.h"
++#include "regcache.h"
++#include "linux-tdep.h"
++#include "sw64-tdep.h"
++#include "gdbarch.h"
++
++#include "features/sw64-linux.c"
++
++#ifndef LHX20240711_catch
++#include "xml-syscall.h"
++#endif
++
++#ifndef LHX20240716_record
++#include "glibc-tdep.h"
++#include "sw64-linux-tdep.h"
++#include "tramp-frame.h"
++#include "trad-frame.h"
++#include "target/target.h"
++
++#include "stap-probe.h"
++#include "parser-defs.h"
++#include "user-regs.h"
++#include
++
++#include "record-full.h"
++#include "linux-record.h"
++#endif
++
++/* This enum represents the signals' numbers on the SW64
++ architecture. It just contains the signal definitions which are
++ different from the generic implementation.
++
++ It is derived from the file ,
++ from the Linux kernel tree. */
++
++enum
++ {
++ /* SIGABRT is the same as in the generic implementation, but is
++ defined here because SIGIOT depends on it. */
++ SW64_LINUX_SIGABRT = 6,
++ SW64_LINUX_SIGEMT = 7,
++ SW64_LINUX_SIGBUS = 10,
++ SW64_LINUX_SIGSYS = 12,
++ SW64_LINUX_SIGURG = 16,
++ SW64_LINUX_SIGSTOP = 17,
++ SW64_LINUX_SIGTSTP = 18,
++ SW64_LINUX_SIGCONT = 19,
++ SW64_LINUX_SIGCHLD = 20,
++ SW64_LINUX_SIGIO = 23,
++ SW64_LINUX_SIGINFO = 29,
++ SW64_LINUX_SIGUSR1 = 30,
++ SW64_LINUX_SIGUSR2 = 31,
++ SW64_LINUX_SIGPOLL = SW64_LINUX_SIGIO,
++ SW64_LINUX_SIGPWR = SW64_LINUX_SIGINFO,
++ SW64_LINUX_SIGIOT = SW64_LINUX_SIGABRT,
++ };
++
++/* Under GNU/Linux, signal handler invocations can be identified by
++ the designated code sequence that is used to return from a signal
++ handler. In particular, the return address of a signal handler
++ points to a sequence that copies $sp to $16, loads $0 with the
++ appropriate syscall number, and finally enters the kernel.
++
++ This is somewhat complicated in that:
++ (1) the expansion of the "mov" assembler macro has changed over
++ time, from "bis src,src,dst" to "bis zero,src,dst",
++ (2) the kernel has changed from using "addq" to "lda" to load the
++ syscall number,
++ (3) there is a "normal" sigreturn and an "rt" sigreturn which
++ has a different stack layout. */
++
++static long
++sw64_linux_sigtramp_offset_1 (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ switch (sw64_read_insn (gdbarch, pc))
++ {
++ case 0x47de0410: /* bis $30,$30,$16 */
++ case 0x43fe0750: /* bis $31,$30,$16 */
++ return 0;
++
++ case 0x43ecf400: /* addq $31,103,$0 */
++#if 0
++ case 0x201f0067: /* lda $0,103($31) */
++ case 0x201f015f: /* lda $0,351($31) */
++#else
++ case 0xf81f0067U:
++ case 0xf81f015fU:
++#endif
++ return 4;
++
++ case 0x02000083: /* call_pal callsys */
++ return 8;
++
++ default:
++ return -1;
++ }
++}
++
++static LONGEST
++sw64_linux_sigtramp_offset (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ long i, off;
++
++ if (pc & 3)
++ return -1;
++
++ /* Guess where we might be in the sequence. */
++ off = sw64_linux_sigtramp_offset_1 (gdbarch, pc);
++ if (off < 0)
++ return -1;
++
++ /* Verify that the other two insns of the sequence are as we expect. */
++ pc -= off;
++ for (i = 0; i < 12; i += 4)
++ {
++ if (i == off)
++ continue;
++ if (sw64_linux_sigtramp_offset_1 (gdbarch, pc + i) != i)
++ return -1;
++ }
++
++ return off;
++}
++
++static int
++sw64_linux_pc_in_sigtramp (struct gdbarch *gdbarch,
++ CORE_ADDR pc, const char *func_name)
++{
++ return sw64_linux_sigtramp_offset (gdbarch, pc) >= 0;
++}
++
++static CORE_ADDR
++sw64_linux_sigcontext_addr (frame_info_ptr this_frame)
++{
++ struct gdbarch *gdbarch = get_frame_arch (this_frame);
++ CORE_ADDR pc;
++ ULONGEST sp;
++ long off;
++
++ pc = get_frame_pc (this_frame);
++ sp = get_frame_register_unsigned (this_frame, SW64_SP_REGNUM);
++
++ off = sw64_linux_sigtramp_offset (gdbarch, pc);
++ gdb_assert (off >= 0);
++
++ /* __NR_rt_sigreturn has a couple of structures on the stack. This is:
++
++ struct rt_sigframe {
++ struct siginfo info;
++ struct ucontext uc;
++ };
++
++ offsetof (struct rt_sigframe, uc.uc_mcontext); */
++
++ if (sw64_read_insn (gdbarch, pc - off + 4) == 0xf81f015fU)
++ return sp + 176;
++
++ /* __NR_sigreturn has the sigcontext structure at the top of the stack. */
++ return sp;
++}
++
++/* Supply register REGNUM from the buffer specified by GREGS and LEN
++ in the general-purpose register set REGSET to register cache
++ REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
++
++static void
++sw64_linux_supply_gregset (const struct regset *regset,
++ struct regcache *regcache,
++ int regnum, const void *gregs, size_t len)
++{
++ const gdb_byte *regs = (const gdb_byte *) gregs;
++
++ gdb_assert (len >= 32 * 8);
++ sw64_supply_int_regs (regcache, regnum, regs, regs + 31 * 8,
++ len >= 33 * 8 ? regs + 32 * 8 : NULL);
++}
++
++/* Collect register REGNUM from the register cache REGCACHE and store
++ it in the buffer specified by GREGS and LEN as described by the
++ general-purpose register set REGSET. If REGNUM is -1, do this for
++ all registers in REGSET. */
++
++static void
++sw64_linux_collect_gregset (const struct regset *regset,
++ const struct regcache *regcache,
++ int regnum, void *gregs, size_t len)
++{
++ gdb_byte *regs = (gdb_byte *) gregs;
++
++ gdb_assert (len >= 32 * 8);
++ sw64_fill_int_regs (regcache, regnum, regs, regs + 31 * 8,
++ len >= 33 * 8 ? regs + 32 * 8 : NULL);
++}
++
++/* Supply register REGNUM from the buffer specified by FPREGS and LEN
++ in the floating-point register set REGSET to register cache
++ REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
++
++static void
++sw64_linux_supply_fpregset (const struct regset *regset,
++ struct regcache *regcache,
++ int regnum, const void *fpregs, size_t len)
++{
++ const gdb_byte *regs = (const gdb_byte *) fpregs;
++
++ gdb_assert (len >= 32 * 8);
++ sw64_supply_fp_regs (regcache, regnum, regs, regs + 31 * 8);
++}
++
++/* Collect register REGNUM from the register cache REGCACHE and store
++ it in the buffer specified by FPREGS and LEN as described by the
++ general-purpose register set REGSET. If REGNUM is -1, do this for
++ all registers in REGSET. */
++
++static void
++sw64_linux_collect_fpregset (const struct regset *regset,
++ const struct regcache *regcache,
++ int regnum, void *fpregs, size_t len)
++{
++ gdb_byte *regs = (gdb_byte *) fpregs;
++
++ gdb_assert (len >= 32 * 8);
++ sw64_fill_fp_regs (regcache, regnum, regs, regs + 31 * 8);
++}
++
++static const struct regset sw64_linux_gregset =
++{
++ NULL,
++ sw64_linux_supply_gregset, sw64_linux_collect_gregset
++};
++
++static const struct regset sw64_linux_fpregset =
++{
++ NULL,
++ sw64_linux_supply_fpregset, sw64_linux_collect_fpregset
++};
++
++/* Iterate over core file register note sections. */
++
++static void
++sw64_linux_iterate_over_regset_sections (struct gdbarch *gdbarch,
++ iterate_over_regset_sections_cb *cb,
++ void *cb_data,
++ const struct regcache *regcache)
++{
++ cb (".reg", SW64_LINUX_SIZEOF_GREGSET, SW64_LINUX_SIZEOF_GREGSET, &sw64_linux_gregset, NULL, cb_data);
++ cb (".reg2", SW64_LINUX_SIZEOF_FPREGSET, SW64_LINUX_SIZEOF_FPREGSET, &sw64_linux_fpregset, NULL, cb_data);
++}
++
++/* Implementation of `gdbarch_gdb_signal_from_target', as defined in
++ gdbarch.h. */
++
++static enum gdb_signal
++sw64_linux_gdb_signal_from_target (struct gdbarch *gdbarch,
++ int signal)
++{
++ switch (signal)
++ {
++ case SW64_LINUX_SIGEMT:
++ return GDB_SIGNAL_EMT;
++
++ case SW64_LINUX_SIGBUS:
++ return GDB_SIGNAL_BUS;
++
++ case SW64_LINUX_SIGSYS:
++ return GDB_SIGNAL_SYS;
++
++ case SW64_LINUX_SIGURG:
++ return GDB_SIGNAL_URG;
++
++ case SW64_LINUX_SIGSTOP:
++ return GDB_SIGNAL_STOP;
++
++ case SW64_LINUX_SIGTSTP:
++ return GDB_SIGNAL_TSTP;
++
++ case SW64_LINUX_SIGCONT:
++ return GDB_SIGNAL_CONT;
++
++ case SW64_LINUX_SIGCHLD:
++ return GDB_SIGNAL_CHLD;
++
++ /* No way to differentiate between SIGIO and SIGPOLL.
++ Therefore, we just handle the first one. */
++ case SW64_LINUX_SIGIO:
++ return GDB_SIGNAL_IO;
++
++ /* No way to differentiate between SIGINFO and SIGPWR.
++ Therefore, we just handle the first one. */
++ case SW64_LINUX_SIGINFO:
++ return GDB_SIGNAL_INFO;
++
++ case SW64_LINUX_SIGUSR1:
++ return GDB_SIGNAL_USR1;
++
++ case SW64_LINUX_SIGUSR2:
++ return GDB_SIGNAL_USR2;
++ }
++
++ return linux_gdb_signal_from_target (gdbarch, signal);
++}
++
++/* Implementation of `gdbarch_gdb_signal_to_target', as defined in
++ gdbarch.h. */
++
++static int
++sw64_linux_gdb_signal_to_target (struct gdbarch *gdbarch,
++ enum gdb_signal signal)
++{
++ switch (signal)
++ {
++ case GDB_SIGNAL_EMT:
++ return SW64_LINUX_SIGEMT;
++
++ case GDB_SIGNAL_BUS:
++ return SW64_LINUX_SIGBUS;
++
++ case GDB_SIGNAL_SYS:
++ return SW64_LINUX_SIGSYS;
++
++ case GDB_SIGNAL_URG:
++ return SW64_LINUX_SIGURG;
++
++ case GDB_SIGNAL_STOP:
++ return SW64_LINUX_SIGSTOP;
++
++ case GDB_SIGNAL_TSTP:
++ return SW64_LINUX_SIGTSTP;
++
++ case GDB_SIGNAL_CONT:
++ return SW64_LINUX_SIGCONT;
++
++ case GDB_SIGNAL_CHLD:
++ return SW64_LINUX_SIGCHLD;
++
++ case GDB_SIGNAL_IO:
++ return SW64_LINUX_SIGIO;
++
++ case GDB_SIGNAL_INFO:
++ return SW64_LINUX_SIGINFO;
++
++ case GDB_SIGNAL_USR1:
++ return SW64_LINUX_SIGUSR1;
++
++ case GDB_SIGNAL_USR2:
++ return SW64_LINUX_SIGUSR2;
++
++ case GDB_SIGNAL_POLL:
++ return SW64_LINUX_SIGPOLL;
++
++ case GDB_SIGNAL_PWR:
++ return SW64_LINUX_SIGPWR;
++ }
++
++ return linux_gdb_signal_to_target (gdbarch, signal);
++}
++
++#ifndef LHX20240711_catch
++/* Return the current system call's number present in the
++ v0 register. When the function fails, it returns -1. */
++
++static LONGEST
++sw64_linux_get_syscall_number (struct gdbarch *gdbarch,
++ thread_info *thread)
++{
++ struct regcache *regcache = get_thread_regcache (thread);
++ //sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ int regsize = register_size (gdbarch, SW64_V0_REGNUM);
++ /* The content of a register */
++ gdb_byte buf[8];
++ /* The result */
++ LONGEST ret;
++
++ gdb_assert (regsize <= sizeof (buf));
++
++ /* Getting the system call number from the register.
++ syscall number is in v0 or $0. */
++ regcache->cooked_read (SW64_V0_REGNUM, buf);
++
++ ret = extract_signed_integer (buf, regsize, byte_order);
++
++ return ret;
++}
++#endif
++
++#ifndef LHX20240716_record
++/* Initialize linux_record_tdep if not initialized yet.
++ WORDSIZE is 4 or 8 for 32- or 64-bit PowerPC Linux respectively.
++ Sizes of data structures are initialized accordingly. */
++
++static void
++sw64_init_linux_record_tdep (struct linux_record_tdep *record_tdep,
++ int wordsize)
++
++{
++ /* Simply return if it had been initialized. */
++ if (record_tdep->size_pointer != 0)
++ return;
++
++ /* These values are the size of the type that will be used in a system
++ call. They are obtained from Linux Kernel source. */
++
++ if (wordsize == 8)
++ {
++ record_tdep->size_pointer = 8;
++ record_tdep->size__old_kernel_stat = 32;
++ record_tdep->size_tms = 32;
++ record_tdep->size_loff_t = 8;
++ record_tdep->size_flock = 32;
++ record_tdep->size_oldold_utsname = 45;
++ record_tdep->size_ustat = 32;
++ record_tdep->size_old_sigaction = 32;
++ record_tdep->size_old_sigset_t = 8;
++ record_tdep->size_rlimit = 16;
++ record_tdep->size_rusage = 144;
++ record_tdep->size_timeval = 16;
++ record_tdep->size_timezone = 8;
++ record_tdep->size_old_gid_t = 4;
++ record_tdep->size_old_uid_t = 4;
++ record_tdep->size_fd_set = 128;
++ record_tdep->size_old_dirent = 280;
++ record_tdep->size_statfs = 120;
++ record_tdep->size_statfs64 = 120;
++ record_tdep->size_sockaddr = 16;
++ record_tdep->size_int = 4;
++ record_tdep->size_long = 8;
++ record_tdep->size_ulong = 8;
++ record_tdep->size_msghdr = 56;
++ record_tdep->size_itimerval = 32;
++ record_tdep->size_stat = 144;
++ record_tdep->size_old_utsname = 325;
++ record_tdep->size_sysinfo = 112;
++ record_tdep->size_msqid_ds = 120;
++ record_tdep->size_shmid_ds = 112;
++ record_tdep->size_new_utsname = 390;
++ record_tdep->size_timex = 208;
++ record_tdep->size_mem_dqinfo = 24;
++ record_tdep->size_if_dqblk = 72;
++ record_tdep->size_fs_quota_stat = 80;
++ record_tdep->size_timespec = 16;
++ record_tdep->size_pollfd = 8;
++ record_tdep->size_NFS_FHSIZE = 32;
++ record_tdep->size_knfsd_fh = 132;
++ record_tdep->size_TASK_COMM_LEN = 16;
++ record_tdep->size_sigaction = 32;
++ record_tdep->size_sigset_t = 8;
++ record_tdep->size_siginfo_t = 128;
++ record_tdep->size_cap_user_data_t = 8;
++ record_tdep->size_stack_t = 24;
++ record_tdep->size_off_t = 8;
++ record_tdep->size_stat64 = 104;
++ record_tdep->size_gid_t = 4;
++ record_tdep->size_uid_t = 4;
++ record_tdep->size_PAGE_SIZE = 0x10000; /* 64KB */
++ record_tdep->size_flock64 = 32;
++ record_tdep->size_io_event = 32;
++ record_tdep->size_iocb = 64;
++ record_tdep->size_epoll_event = 16;
++ record_tdep->size_itimerspec = 32;
++ record_tdep->size_mq_attr = 64;
++ record_tdep->size_termios = 44;
++ record_tdep->size_pid_t = 4;
++ record_tdep->size_winsize = 8;
++ record_tdep->size_serial_struct = 72;
++ record_tdep->size_serial_icounter_struct = 80;
++ record_tdep->size_size_t = 8;
++ record_tdep->size_iovec = 16;
++ record_tdep->size_time_t = 8;
++ }
++ else if (wordsize == 4)
++ {
++ record_tdep->size_pointer = 4;
++ record_tdep->size__old_kernel_stat = 32;
++ record_tdep->size_tms = 16;
++ record_tdep->size_loff_t = 8;
++ record_tdep->size_flock = 16;
++ record_tdep->size_oldold_utsname = 45;
++ record_tdep->size_ustat = 20;
++ record_tdep->size_old_sigaction = 16;
++ record_tdep->size_old_sigset_t = 4;
++ record_tdep->size_rlimit = 8;
++ record_tdep->size_rusage = 72;
++ record_tdep->size_timeval = 8;
++ record_tdep->size_timezone = 8;
++ record_tdep->size_old_gid_t = 4;
++ record_tdep->size_old_uid_t = 4;
++ record_tdep->size_fd_set = 128;
++ record_tdep->size_old_dirent = 268;
++ record_tdep->size_statfs = 64;
++ record_tdep->size_statfs64 = 88;
++ record_tdep->size_sockaddr = 16;
++ record_tdep->size_int = 4;
++ record_tdep->size_long = 4;
++ record_tdep->size_ulong = 4;
++ record_tdep->size_msghdr = 28;
++ record_tdep->size_itimerval = 16;
++ record_tdep->size_stat = 88;
++ record_tdep->size_old_utsname = 325;
++ record_tdep->size_sysinfo = 64;
++ record_tdep->size_msqid_ds = 68;
++ record_tdep->size_shmid_ds = 60;
++ record_tdep->size_new_utsname = 390;
++ record_tdep->size_timex = 128;
++ record_tdep->size_mem_dqinfo = 24;
++ record_tdep->size_if_dqblk = 72;
++ record_tdep->size_fs_quota_stat = 80;
++ record_tdep->size_timespec = 8;
++ record_tdep->size_pollfd = 8;
++ record_tdep->size_NFS_FHSIZE = 32;
++ record_tdep->size_knfsd_fh = 132;
++ record_tdep->size_TASK_COMM_LEN = 16;
++ record_tdep->size_sigaction = 20;
++ record_tdep->size_sigset_t = 8;
++ record_tdep->size_siginfo_t = 128;
++ record_tdep->size_cap_user_data_t = 4;
++ record_tdep->size_stack_t = 12;
++ record_tdep->size_off_t = 4;
++ record_tdep->size_stat64 = 104;
++ record_tdep->size_gid_t = 4;
++ record_tdep->size_uid_t = 4;
++ record_tdep->size_PAGE_SIZE = 0x10000; /* 64KB */
++ record_tdep->size_flock64 = 32;
++ record_tdep->size_io_event = 32;
++ record_tdep->size_iocb = 64;
++ record_tdep->size_epoll_event = 16;
++ record_tdep->size_itimerspec = 16;
++ record_tdep->size_mq_attr = 32;
++ record_tdep->size_termios = 44;
++ record_tdep->size_pid_t = 4;
++ record_tdep->size_winsize = 8;
++ record_tdep->size_serial_struct = 60;
++ record_tdep->size_serial_icounter_struct = 80;
++ record_tdep->size_size_t = 4;
++ record_tdep->size_iovec = 8;
++ record_tdep->size_time_t = 4;
++ }
++ else
++ internal_error (_("unexpected wordsize"));
++
++ /* These values are the second argument of system call "sys_fcntl"
++ and "sys_fcntl64". They are obtained from Linux Kernel source. */
++ record_tdep->fcntl_F_GETLK = 5;
++ record_tdep->fcntl_F_GETLK64 = 12;
++ record_tdep->fcntl_F_SETLK64 = 13;
++ record_tdep->fcntl_F_SETLKW64 = 14;
++
++ record_tdep->arg1 = SW64_A0_REGNUM + 0;
++ record_tdep->arg2 = SW64_A0_REGNUM + 1;
++ record_tdep->arg3 = SW64_A0_REGNUM + 2;
++ record_tdep->arg4 = SW64_A0_REGNUM + 3;
++ record_tdep->arg5 = SW64_A0_REGNUM + 4;
++ record_tdep->arg6 = SW64_A0_REGNUM + 5;
++
++ /* These values are the second argument of system call "sys_ioctl".
++ They are obtained from Linux Kernel source.
++ See arch/powerpc/include/uapi/asm/ioctls.h. */
++ record_tdep->ioctl_TCGETS = 0x403c7413;//402c7413
++ record_tdep->ioctl_TCSETS = 0x802c7414;//802c7414
++ record_tdep->ioctl_TCSETSW = 0x802c7415;//802c7415
++ record_tdep->ioctl_TCSETSF = 0x802c7416;//802c7416
++ record_tdep->ioctl_TCGETA = 0x40127417;//40127417
++ record_tdep->ioctl_TCSETA = 0x80127418;//80127418
++ record_tdep->ioctl_TCSETAW = 0x80127419;//80127419
++ record_tdep->ioctl_TCSETAF = 0x8012741c;//8012741c
++ record_tdep->ioctl_TCSBRK = 0x2000741d;//2000741d
++ record_tdep->ioctl_TCXONC = 0x2000741e;//2000741e
++ record_tdep->ioctl_TCFLSH = 0x2000741f;//2000741f
++ record_tdep->ioctl_TIOCEXCL = 0x540c;//
++ record_tdep->ioctl_TIOCNXCL = 0x540d;//
++ record_tdep->ioctl_TIOCSCTTY = 0x540e;//
++ record_tdep->ioctl_TIOCGPGRP = 0x40047477;//40047477
++ record_tdep->ioctl_TIOCSPGRP = 0x80047476;//80047476
++ record_tdep->ioctl_TIOCOUTQ = 0x40047473;//40047473
++ record_tdep->ioctl_TIOCSTI = 0x5412;//5412
++ record_tdep->ioctl_TIOCGWINSZ = 0x40087468;//40087468
++ record_tdep->ioctl_TIOCSWINSZ = 0x80087467;//80087467
++ record_tdep->ioctl_TIOCMGET = 0x5415;//
++ record_tdep->ioctl_TIOCMBIS = 0x5416;//
++ record_tdep->ioctl_TIOCMBIC = 0x5417;//
++ record_tdep->ioctl_TIOCMSET = 0x5418;//
++ record_tdep->ioctl_TIOCGSOFTCAR = 0x5419;//
++ record_tdep->ioctl_TIOCSSOFTCAR = 0x541a;//
++ record_tdep->ioctl_FIONREAD = 0x4004667f;//4004667f
++ record_tdep->ioctl_TIOCINQ = 0x4004667f;//4004667f
++ record_tdep->ioctl_TIOCLINUX = 0x541c;//
++ record_tdep->ioctl_TIOCCONS = 0x541d;//
++ record_tdep->ioctl_TIOCGSERIAL = 0x541e;//
++ record_tdep->ioctl_TIOCSSERIAL = 0x541f;//
++ record_tdep->ioctl_TIOCPKT = 0x5420;//
++ record_tdep->ioctl_FIONBIO = 0x8004667e;//8004667e
++ record_tdep->ioctl_TIOCNOTTY = 0x5422;//
++ record_tdep->ioctl_TIOCSETD = 0x5423;//
++ record_tdep->ioctl_TIOCGETD = 0x5424;//
++ record_tdep->ioctl_TCSBRKP = 0x5425;//
++ record_tdep->ioctl_TIOCSBRK = 0x5427;
++ record_tdep->ioctl_TIOCCBRK = 0x5428;
++ record_tdep->ioctl_TIOCGSID = 0x5429;
++ record_tdep->ioctl_TIOCGPTN = 0x40045430;//40045430
++ record_tdep->ioctl_TIOCSPTLCK = 0x80045431;//80045431
++ record_tdep->ioctl_FIONCLEX = 0x20006602;//20006602
++ record_tdep->ioctl_FIOCLEX = 0x20006601;//20006601
++ record_tdep->ioctl_FIOASYNC = 0x8004667d;//8004667d
++ record_tdep->ioctl_TIOCSERCONFIG = 0x5453;//
++ record_tdep->ioctl_TIOCSERGWILD = 0x5454;
++ record_tdep->ioctl_TIOCSERSWILD = 0x5455;
++ record_tdep->ioctl_TIOCGLCKTRMIOS = 0x5456;
++ record_tdep->ioctl_TIOCSLCKTRMIOS = 0x5457;
++ record_tdep->ioctl_TIOCSERGSTRUCT = 0x5458;
++ record_tdep->ioctl_TIOCSERGETLSR = 0x5459;
++ record_tdep->ioctl_TIOCSERGETMULTI = 0x545a;
++ record_tdep->ioctl_TIOCSERSETMULTI = 0x545b;
++ record_tdep->ioctl_TIOCMIWAIT = 0x545c;
++ record_tdep->ioctl_TIOCGICOUNT = 0x545d;
++ record_tdep->ioctl_FIOQSIZE = 0x40086680;//
++}
++
++/* sw64 process record-replay constructs: syscall, signal etc. */
++
++struct linux_record_tdep sw64_linux_record_tdep;
++
++/* Enum that defines the sw64 linux specific syscall identifiers used for
++ process record/replay. */
++enum sw64_syscall {
++
++sw64_sys_exit =1,
++sw64_sys_fork =2,
++sw64_sys_read =3,
++sw64_sys_write =4,
++sw64_sys_close =6,
++//sw64_sys_wait4 =7,
++sw64_sys_link =9,
++sw64_sys_unlink =10,
++sw64_sys_chdir =12,
++sw64_sys_fchdir =13,
++sw64_sys_mknod =14,
++sw64_sys_chmod =15,
++sw64_sys_chown =16,
++sw64_sys_brk =17,
++sw64_sys_lseek =19,
++sw64_sys_getxpid =20,//getxpid
++sw64_sys_mount =21,
++sw64_sys_umount2 =22,
++sw64_sys_setuid =23,
++sw64_sys_getxuid =24,//getxuid
++sw64_sys_ptrace =26,
++sw64_sys_access =33,
++sw64_sys_sync =36,
++sw64_sys_kill =37,
++sw64_sys_setpgid =39,
++sw64_sys_dup =41,
++sw64_sys_pipe =42,
++sw64_sys_set_program_attributes =43,
++sw64_sys_open =45,
++sw64_sys_getxgid =47,//getxgid
++sw64_sys_sigprocmask =48,
++sw64_sys_acct =51,
++sw64_sys_sigpending =52,
++sw64_sys_ioctl =54,
++sw64_sys_symlink =57,
++sw64_sys_readlink =58,
++sw64_sys_execve =59,
++sw64_sys_umask =60,
++sw64_sys_chroot =61,
++sw64_sys_getpgrp =63,
++sw64_sys_getpagesize =64,
++sw64_sys_vfork =66,
++sw64_sys_stat =67,
++sw64_sys_lstat =68,
++sw64_sys_mmap =71,
++sw64_sys_munmap =73,
++sw64_sys_mprotect =74,
++sw64_sys_madvise =75,
++sw64_sys_vhangup =76,
++sw64_sys_getgroups =79,
++sw64_sys_setgroups =80,
++sw64_sys_setpgrp =82,
++sw64_sys_setitimer =83,
++sw64_sys_getitimer =86,
++sw64_sys_gethostname =87,
++sw64_sys_sethostname =88,
++sw64_sys_getdtablesize =89,
++sw64_sys_dup2 =90,
++sw64_sys_fstat =91,
++sw64_sys_fcntl =92,
++sw64_sys_select =93,
++sw64_sys_poll =94,
++sw64_sys_fsync =95,
++sw64_sys_setpriority =96,
++sw64_sys_socket =97,
++sw64_sys_connect =98,
++sw64_sys_accept =99,
++sw64_sys_getpriority =100,
++sw64_sys_send =101,
++sw64_sys_recv =102,
++sw64_sys_sigreturn =103,
++sw64_sys_bind =104,
++sw64_sys_setsockopt =105,
++sw64_sys_listen =106,
++sw64_sys_sigsuspend =111,
++sw64_sysi_sigstack =112,
++sw64_sys_recvmsg =113,
++sw64_sys_sendmsg =114,
++sw64_sys_gettimeofday =116,
++//sw64_sys_getrusage =117,
++sw64_sys_osf_getrusage =117,
++sw64_sys_getsockopt =118,
++sw64_sys_socketcall =119,
++sw64_sys_readv =120,
++sw64_sys_writev =121,
++sw64_sys_settimeofday =122,
++sw64_sys_fchown =123,
++sw64_sys_fchmod =124,
++sw64_sys_recvfrom =125,
++sw64_sys_setreuid =126,
++sw64_sys_setregid =127,
++sw64_sys_rename =128,
++sw64_sys_truncate =129,
++sw64_sys_ftruncate =130,
++sw64_sys_flock =131,
++sw64_sys_setgid =132,
++sw64_sys_sendto =133,
++sw64_sys_shutdown =134,
++sw64_sys_socketpair =135,
++sw64_sys_mkdir =136,
++sw64_sys_rmdir =137,
++sw64_sys_utimes =138,
++sw64_sys_getpeername =141,
++sw64_sys_getrlimit =144,
++sw64_sys_setrlimit =145,
++sw64_sys_setsid =147,
++sw64_sys_quotactl =148,
++sw64_sys_getsockname =150,
++sw64_sys_sigaction =156,
++sw64_sys_getdirentries =159,
++sw64_sys_statfs =160,
++sw64_sys_fstatfs =161,
++sw64_sys_getdomainname =165,
++sw64_sys_setdomainname =166,
++sw64_sys_bpf =170,
++sw64_sys_userfaultfd =171,
++sw64_sys_membarrier =172,
++sw64_sys_mlock2 =173,
++sw64_sys_getpid =174,//REPITATION
++sw64_sys_getppid =175,
++sw64_sys_getuid =176,//REPITATION
++sw64_sys_geteuid =177,
++sw64_sys_getgid =178,//REPITATION
++sw64_sys_getegid =179,
++sw64_sys_swapon =199,
++sw64_sys_msgctl =200,
++sw64_sys_msgget =201,
++sw64_sys_msgrcv =202,
++sw64_sys_msgsnd =203,
++sw64_sys_semctl =204,
++sw64_sys_semget =205,
++sw64_sys_semop =206,
++sw64_sys_utsname =207,
++sw64_sys_lchown =208,
++sw64_sys_shmat =209,
++sw64_sys_shmctl =210,
++sw64_sys_shmdt =211,
++sw64_sys_shmget =212,
++sw64_sys_msync =217,
++sw64_sysi_stat =224,
++sw64_sys_statfs64 =227,
++sw64_sys_fstatfs64 =230,
++sw64_sys_getpgid =233,
++sw64_sys_getsid =234,
++sw64_sys_sigaltstack =235,
++//sw64_sys_osf_sysinfo =241,//
++sw64_sys_proplist_syscall =244,
++sw64_sys_usleep_thread =251,
++sw64_sys_sysfs =254,
++sw64_sys_getsysinfo =256,
++sw64_sys_setsysinfo =257,
++sw64_sys_bdflush =300,
++sw64_sys_sethae =301,
++sw64_sys_old_adjtimex =303,
++sw64_sys_swapoff =304,
++sw64_sys_getdents =305,
++sw64_sys_create_module =306,
++sw64_sys_init_module =307,
++sw64_sys_delete_module =308,
++sw64_sys_get_kernel_syms =309,
++sw64_sys_syslog =310,
++sw64_sys_reboot =311,
++sw64_sys_clone =312,
++sw64_sys_uselib =313,
++sw64_sys_mlock =314,
++sw64_sys_munlock =315,
++sw64_sys_mlockall =316,
++sw64_sys_munlockall =317,
++sw64_sys_sysinfo =318,
++sw64_sys_sysctl =319,
++sw64_sys_oldumount =321,
++sw64_sys_times =323,
++sw64_sys_personality =324,
++sw64_sys_setfsuid =325,
++sw64_sys_setfsgid =326,
++sw64_sys_ustat =327,
++sw64_sys_sched_setparam =330,
++sw64_sys_sched_getparam =331,
++sw64_sys_sched_setscheduler =332,
++sw64_sys_sched_getscheduler =333,
++sw64_sys_sched_yield =334,
++sw64_sys_sched_get_priority_max =335,
++sw64_sys_sched_get_priority_min =336,
++sw64_sys_sched_rr_get_interval =337,
++sw64_sys_afs_syscall =338,
++sw64_sys_uname =339,
++sw64_sys_nanosleep =340,
++sw64_sys_mremap =341,
++sw64_sys_nfsservctl =342,
++sw64_sys_setresuid =343,
++sw64_sys_getresuid =344,
++sw64_sys_pciconfig_read =345,
++sw64_sys_pciconfig_write =346,
++sw64_sys_query_module =347,
++sw64_sys_prctl =348,
++sw64_sys_pread64 =349,
++sw64_sys_pwrite64 =350,
++sw64_sys_rt_sigreturn =351,
++sw64_sys_rt_sigaction =352,
++sw64_sys_rt_sigprocmask =353,
++sw64_sys_rt_sigpending =354,
++sw64_sys_rt_sigtimedwait =355,
++sw64_sys_rt_sigqueueinfo =356,
++sw64_sys_rt_sigsuspend =357,
++sw64_sys_getrusage =364,
++sw64_sys_wait4 =365,
++sw64_sys_adjtimex =366,
++sw64_sys_getcwd =367,
++sw64_sys_capget =368,
++sw64_sys_capset =369,
++sw64_sys_sendfile =370,
++sw64_sys_setresgid =371,
++sw64_sys_getresgid =372,
++sw64_sys_dipc =373,
++sw64_sys_pivot_root =374,
++sw64_sys_mincore =375,
++sw64_sys_pciconfig_iobase =376,
++sw64_sys_getdents64 =377,
++sw64_sys_gettid =378,
++sw64_sys_readahead =379,
++sw64_sys_tkill =381,
++sw64_sys_setxattr =382,
++sw64_sys_lsetxattr =383,
++sw64_sys_fsetxattr =384,
++sw64_sys_getxattr =385,
++sw64_sys_lgetxattr =386,
++sw64_sys_fgetxattr =387,
++sw64_sys_listxattr =388,
++sw64_sys_llistxattr =389,
++sw64_sys_flistxattr =390,
++sw64_sys_removexattr =391,
++sw64_sys_lremovexattr =392,
++sw64_sys_fremovexattr =393,
++sw64_sys_futex =394,
++sw64_sys_sched_setaffinity =395,
++sw64_sys_sched_getaffinity =396,
++sw64_sys_tuxcall =397,
++sw64_sys_io_setup =398,
++sw64_sys_io_destroy =399,
++sw64_sys_io_getevents =400,
++sw64_sys_io_submit =401,
++sw64_sys_io_cancel =402,
++sw64_sys_io_pgetevents =403,
++sw64_sys_rseq =404,
++sw64_sys_exit_group =405,
++sw64_sys_lookup_dcookie =406,
++sw64_sys_epoll_create =407,
++sw64_sys_epoll_ctl =408,
++sw64_sys_epoll_wait =409,
++sw64_sys_remap_file_pages =410,
++sw64_sys_set_tid_address =411,
++sw64_sys_restart_syscall =412,
++sw64_sys_fadvise64 =413,
++sw64_sys_timer_create =414,
++sw64_sys_timer_settime =415,
++sw64_sys_timer_gettime =416,
++sw64_sys_timer_getoverrun =417,
++sw64_sys_timer_delete =418,
++sw64_sys_clock_settime =419,
++sw64_sys_clock_gettime =420,
++sw64_sys_clock_getres =421,
++sw64_sys_clock_nanosleep =422,
++sw64_sys_semtimedop =423,
++sw64_sys_tgkill =424,
++sw64_sys_stat64 =425,
++sw64_sys_lstat64 =426,
++sw64_sys_fstat64 =427,
++sw64_sys_vserver =428,
++sw64_sys_mbind =429,
++sw64_sys_get_mempolicy =430,
++sw64_sys_set_mempolicy =431,
++sw64_sys_mq_open =432,
++sw64_sys_mq_unlink =433,
++sw64_sys_mq_timedsend =434,
++sw64_sys_mq_timedreceive =435,
++sw64_sys_mq_notify =436,
++sw64_sys_mq_getsetattr =437,
++sw64_sys_waitid =438,
++sw64_sys_add_key =439,
++sw64_sys_request_key =440,
++sw64_sys_keyctl =441,
++sw64_sys_ioprio_set =442,
++sw64_sys_ioprio_get =443,
++sw64_sys_inotify_init =444,
++sw64_sys_inotify_add_watch =445,
++sw64_sys_inotify_rm_watch =446,
++sw64_sys_fdatasync =447,
++sw64_sys_kexec_load =448,
++sw64_sys_migrate_pages =449,
++sw64_sys_openat =450,
++sw64_sys_mkdirat =451,
++sw64_sys_mknodat =452,
++sw64_sys_fchownat =453,
++sw64_sys_futimesat =454,
++sw64_sys_fstatat64 =455,
++sw64_sys_unlinkat =456,
++sw64_sys_renameat =457,
++sw64_sys_linkat =458,
++sw64_sys_symlinkat =459,
++sw64_sys_readlinkat =460,
++sw64_sys_fchmodat =461,
++sw64_sys_faccessat =462,
++sw64_sys_pselect6 =463,
++sw64_sys_ppoll =464,
++sw64_sys_unshare =465,
++sw64_sys_set_robust_list =466,
++sw64_sys_get_robust_list =467,
++sw64_sys_splice =468,
++sw64_sys_sync_file_range =469,
++sw64_sys_tee =470,
++sw64_sys_vmsplice =471,
++sw64_sys_move_pages =472,
++sw64_sys_getcpu =473,
++sw64_sys_epoll_pwait =474,
++sw64_sys_utimensat =475,
++sw64_sys_signalfd =476,
++sw64_sys_timerfd =477,
++sw64_sys_eventfd =478,
++sw64_sys_recvmmsg =479,
++sw64_sys_fallocate =480,
++sw64_sys_timerfd_create =481,
++sw64_sys_timerfd_settime =482,
++sw64_sys_timerfd_gettime =483,
++sw64_sys_signalfd4 =484,
++sw64_sys_eventfd2 =485,
++sw64_sys_epoll_create1 =486,
++sw64_sys_dup3 =487,
++sw64_sys_pipe2 =488,
++sw64_sys_inotify_init1 =489,
++sw64_sys_preadv =490,
++sw64_sys_pwritev =491,
++sw64_sys_rt_tgsigqueueinfo =492,
++sw64_sys_perf_event_open =493,
++sw64_sys_fanotify_init =494,
++sw64_sys_fanotify_mark =495,
++sw64_sys_prlimit64 =496,
++sw64_sys_name_to_handle_at =497,
++sw64_sys_open_by_handle_at =498,
++sw64_sys_clock_adjtime =499,
++sw64_sys_syncfs =500,
++sw64_sys_setns =501,
++sw64_sys_accept4 =502,
++sw64_sys_sendmmsg =503,
++sw64_sys_process_vm_readv =504,
++sw64_sys_process_vm_writev =505,
++sw64_sys_kcmp =506,
++sw64_sys_finit_module =507,
++sw64_sys_sched_setattr =508,
++sw64_sys_sched_getattr =509,
++sw64_sys_renameat2 =510,
++sw64_sys_getrandom =511,
++sw64_sys_memfd_create =512,
++sw64_sys_execveat =513,
++sw64_sys_seccomp =514,
++sw64_sys_copy_file_range =515,
++sw64_sys_preadv2 =516,
++sw64_sys_pwritev2 =517,
++sw64_sys_statx =518,
++};
++
++/* sw64_canonicalize_syscall maps syscall ids from the native AArch64
++ linux set of syscall ids into a canonical set of syscall ids used by
++ process record. */
++
++static enum gdb_syscall
++sw64_canonicalize_syscall (enum sw64_syscall syscall_number)
++{
++#define SYSCALL_MAP(SYSCALL) case sw64_sys_##SYSCALL: \
++ return gdb_sys_##SYSCALL
++
++#define UNSUPPORTED_SYSCALL_MAP(SYSCALL) case sw64_sys_##SYSCALL: \
++ return gdb_sys_no_syscall
++
++ switch (syscall_number)
++ {
++ //sw syscall
++ //UNSUPPORTED_SYSCALL_MAP(SYSCALL) (execveat);
++ //UNSUPPORTED_SYSCALL_MAP (userfaultfd);
++ //UNSUPPORTED_SYSCALL_MAP (mlock2);
++ //UNSUPPORTED_SYSCALL_MAP (copy_file_range);
++ //UNSUPPORTED_SYSCALL_MAP (preadv2);
++ //UNSUPPORTED_SYSCALL_MAP (pwritev2);
++ //UNSUPPORTED_SYSCALL_MAP (renameat2);
++ //UNSUPPORTED_SYSCALL_MAP (seccomp);
++ //UNSUPPORTED_SYSCALL_MAP (getrandom);
++ //UNSUPPORTED_SYSCALL_MAP (memfd_create);
++ //UNSUPPORTED_SYSCALL_MAP (bpf);
++ //UNSUPPORTED_SYSCALL_MAP(syscalls);
++
++ SYSCALL_MAP (open);
++ SYSCALL_MAP (fstatat64);
++ SYSCALL_MAP (io_setup);
++ SYSCALL_MAP (io_destroy);
++ SYSCALL_MAP (io_submit);
++ SYSCALL_MAP (io_cancel);
++ SYSCALL_MAP (io_getevents);
++
++ SYSCALL_MAP (setxattr);
++ SYSCALL_MAP (lsetxattr);
++ SYSCALL_MAP (fsetxattr);
++ SYSCALL_MAP (getxattr);
++ SYSCALL_MAP (lgetxattr);
++ SYSCALL_MAP (fgetxattr);
++ SYSCALL_MAP (listxattr);
++ SYSCALL_MAP (llistxattr);
++ SYSCALL_MAP (flistxattr);
++ SYSCALL_MAP (removexattr);
++ SYSCALL_MAP (lremovexattr);
++ SYSCALL_MAP (fremovexattr);
++ SYSCALL_MAP (getcwd);
++ SYSCALL_MAP (lookup_dcookie);
++ SYSCALL_MAP (eventfd2);
++ SYSCALL_MAP (epoll_create1);
++ SYSCALL_MAP (epoll_ctl);
++ SYSCALL_MAP (epoll_pwait);
++ SYSCALL_MAP (dup);
++ SYSCALL_MAP (dup3);
++ SYSCALL_MAP (fcntl);
++ SYSCALL_MAP (inotify_init1);
++ SYSCALL_MAP (inotify_add_watch);
++ SYSCALL_MAP (inotify_rm_watch);
++ SYSCALL_MAP (ioctl);
++ SYSCALL_MAP (ioprio_set);
++ SYSCALL_MAP (ioprio_get);
++ SYSCALL_MAP (flock);
++ SYSCALL_MAP (mknodat);
++ SYSCALL_MAP (mkdirat);
++ SYSCALL_MAP (unlinkat);
++ SYSCALL_MAP (symlinkat);
++ SYSCALL_MAP (linkat);
++ SYSCALL_MAP (renameat);
++ UNSUPPORTED_SYSCALL_MAP (umount2);
++ SYSCALL_MAP (mount);
++ SYSCALL_MAP (pivot_root);
++ SYSCALL_MAP (nfsservctl);
++ SYSCALL_MAP (statfs);
++ SYSCALL_MAP (truncate);
++ SYSCALL_MAP (ftruncate);
++ SYSCALL_MAP (fallocate);
++ SYSCALL_MAP (faccessat);
++ SYSCALL_MAP (fchdir);
++ SYSCALL_MAP (chroot);
++ SYSCALL_MAP (fchmod);
++ SYSCALL_MAP (fchmodat);
++ SYSCALL_MAP (fchownat);
++ SYSCALL_MAP (fchown);
++ SYSCALL_MAP (openat);
++ SYSCALL_MAP (close);
++ SYSCALL_MAP (vhangup);
++ SYSCALL_MAP (pipe);
++ SYSCALL_MAP (pipe2);
++ SYSCALL_MAP (quotactl);
++ SYSCALL_MAP (getdents64);
++ SYSCALL_MAP (lseek);
++ SYSCALL_MAP (read);
++ SYSCALL_MAP (write);
++ SYSCALL_MAP (readv);
++ SYSCALL_MAP (writev);
++ SYSCALL_MAP (pread64);
++ SYSCALL_MAP (pwrite64);
++ UNSUPPORTED_SYSCALL_MAP (preadv);
++ UNSUPPORTED_SYSCALL_MAP (pwritev);
++ SYSCALL_MAP (sendfile);
++ SYSCALL_MAP (pselect6);
++ SYSCALL_MAP (ppoll);
++ UNSUPPORTED_SYSCALL_MAP (signalfd4);
++ SYSCALL_MAP (vmsplice);
++ SYSCALL_MAP (splice);
++ SYSCALL_MAP (tee);
++ SYSCALL_MAP (readlinkat);
++ //SYSCALL_MAP (newfstatat);
++
++ SYSCALL_MAP (fstat);
++ SYSCALL_MAP (sync);
++ SYSCALL_MAP (fsync);
++ SYSCALL_MAP (fdatasync);
++ SYSCALL_MAP (sync_file_range);
++ UNSUPPORTED_SYSCALL_MAP (timerfd_create);
++ UNSUPPORTED_SYSCALL_MAP (timerfd_settime);
++ UNSUPPORTED_SYSCALL_MAP (timerfd_gettime);
++ UNSUPPORTED_SYSCALL_MAP (utimensat);
++ SYSCALL_MAP (acct);
++ SYSCALL_MAP (capget);
++ SYSCALL_MAP (capset);
++ SYSCALL_MAP (personality);
++ SYSCALL_MAP (exit);
++ SYSCALL_MAP (exit_group);
++ SYSCALL_MAP (waitid);
++ SYSCALL_MAP (set_tid_address);
++ SYSCALL_MAP (unshare);
++ SYSCALL_MAP (futex);
++ SYSCALL_MAP (set_robust_list);
++ SYSCALL_MAP (get_robust_list);
++ SYSCALL_MAP (nanosleep);
++
++ SYSCALL_MAP (getitimer);
++ SYSCALL_MAP (setitimer);
++ SYSCALL_MAP (kexec_load);
++ SYSCALL_MAP (init_module);
++ SYSCALL_MAP (delete_module);
++ SYSCALL_MAP (timer_create);
++ SYSCALL_MAP (timer_settime);
++ SYSCALL_MAP (timer_gettime);
++ SYSCALL_MAP (timer_getoverrun);
++ SYSCALL_MAP (timer_delete);
++ SYSCALL_MAP (clock_settime);
++ SYSCALL_MAP (clock_gettime);
++ SYSCALL_MAP (clock_getres);
++ SYSCALL_MAP (clock_nanosleep);
++ SYSCALL_MAP (syslog);
++ SYSCALL_MAP (ptrace);
++ SYSCALL_MAP (sched_setparam);
++ SYSCALL_MAP (sched_setscheduler);
++ SYSCALL_MAP (sched_getscheduler);
++ SYSCALL_MAP (sched_getparam);
++ SYSCALL_MAP (sched_setaffinity);
++ SYSCALL_MAP (sched_getaffinity);
++ SYSCALL_MAP (sched_yield);
++ SYSCALL_MAP (sched_get_priority_max);
++ SYSCALL_MAP (sched_get_priority_min);
++ SYSCALL_MAP (sched_rr_get_interval);
++ SYSCALL_MAP (kill);
++ SYSCALL_MAP (tkill);
++ SYSCALL_MAP (tgkill);
++ SYSCALL_MAP (sigaltstack);
++ SYSCALL_MAP (rt_sigsuspend);
++ SYSCALL_MAP (rt_sigaction);
++ SYSCALL_MAP (sigprocmask);
++ SYSCALL_MAP (rt_sigprocmask);
++ SYSCALL_MAP (rt_sigpending);
++ SYSCALL_MAP (rt_sigtimedwait);
++ SYSCALL_MAP (rt_sigqueueinfo);
++ SYSCALL_MAP (sigreturn);
++ SYSCALL_MAP (rt_sigreturn);
++ SYSCALL_MAP (setpriority);
++ SYSCALL_MAP (getpriority);
++ SYSCALL_MAP (reboot);
++ SYSCALL_MAP (setregid);
++ SYSCALL_MAP (setgid);
++ SYSCALL_MAP (setreuid);
++ SYSCALL_MAP (setuid);
++ SYSCALL_MAP (setresuid);
++ SYSCALL_MAP (getresuid);
++ SYSCALL_MAP (setresgid);
++ SYSCALL_MAP (getresgid);
++ SYSCALL_MAP (setfsuid);
++ SYSCALL_MAP (setfsgid);
++ SYSCALL_MAP (times);
++ SYSCALL_MAP (setpgid);
++ SYSCALL_MAP (getpgid);
++ SYSCALL_MAP (getsid);
++ SYSCALL_MAP (setsid);
++ SYSCALL_MAP (getgroups);
++ SYSCALL_MAP (setgroups);
++ SYSCALL_MAP (uname);
++ SYSCALL_MAP (sethostname);
++ SYSCALL_MAP (setdomainname);
++ SYSCALL_MAP (getrlimit);
++ SYSCALL_MAP (setrlimit);
++ SYSCALL_MAP (getrusage);
++ SYSCALL_MAP (umask);
++ SYSCALL_MAP (prctl);
++ SYSCALL_MAP (getcpu);
++ SYSCALL_MAP (gettimeofday);
++ SYSCALL_MAP (settimeofday);
++ SYSCALL_MAP (adjtimex);
++ SYSCALL_MAP (getpid);
++ SYSCALL_MAP (getppid);
++ SYSCALL_MAP (getuid);
++ SYSCALL_MAP (geteuid);
++ SYSCALL_MAP (getgid);
++ SYSCALL_MAP (getegid);
++ SYSCALL_MAP (gettid);
++ SYSCALL_MAP (sysinfo);
++ SYSCALL_MAP (mq_open);
++ SYSCALL_MAP (mq_unlink);
++ SYSCALL_MAP (mq_timedsend);
++ SYSCALL_MAP (mq_timedreceive);
++ SYSCALL_MAP (mq_notify);
++ SYSCALL_MAP (mq_getsetattr);
++ SYSCALL_MAP (msgget);
++ SYSCALL_MAP (msgctl);
++ SYSCALL_MAP (msgrcv);
++ SYSCALL_MAP (msgsnd);
++ SYSCALL_MAP (semget);
++ SYSCALL_MAP (semctl);
++ SYSCALL_MAP (semtimedop);
++ SYSCALL_MAP (semop);
++ SYSCALL_MAP (shmget);
++ SYSCALL_MAP (shmctl);
++ SYSCALL_MAP (shmat);
++ SYSCALL_MAP (shmdt);
++ SYSCALL_MAP (socket);
++ SYSCALL_MAP (socketpair);
++ SYSCALL_MAP (bind);
++ SYSCALL_MAP (listen);
++ SYSCALL_MAP (accept);
++ SYSCALL_MAP (connect);
++ SYSCALL_MAP (getsockname);
++ SYSCALL_MAP (getpeername);
++ SYSCALL_MAP (sendto);
++ SYSCALL_MAP (recvfrom);
++ SYSCALL_MAP (setsockopt);
++ SYSCALL_MAP (getsockopt);
++ SYSCALL_MAP (shutdown);
++ SYSCALL_MAP (sendmsg);
++ SYSCALL_MAP (recvmsg);
++ SYSCALL_MAP (readahead);
++ SYSCALL_MAP (brk);
++ SYSCALL_MAP (munmap);
++ SYSCALL_MAP (mremap);
++ SYSCALL_MAP (add_key);
++ SYSCALL_MAP (request_key);
++ SYSCALL_MAP (keyctl);
++ SYSCALL_MAP (clone);
++ SYSCALL_MAP (execve);
++
++ case sw64_sys_mmap:
++ return gdb_sys_mmap2;
++
++ SYSCALL_MAP (fadvise64);
++ SYSCALL_MAP (swapon);
++ SYSCALL_MAP (swapoff);
++ SYSCALL_MAP (mprotect);
++ SYSCALL_MAP (msync);
++ SYSCALL_MAP (mlock);
++ SYSCALL_MAP (munlock);
++ SYSCALL_MAP (mlockall);
++ SYSCALL_MAP (munlockall);
++ SYSCALL_MAP (mincore);
++ SYSCALL_MAP (madvise);
++ SYSCALL_MAP (remap_file_pages);
++ SYSCALL_MAP (mbind);
++ SYSCALL_MAP (get_mempolicy);
++ SYSCALL_MAP (set_mempolicy);
++ SYSCALL_MAP (migrate_pages);
++ SYSCALL_MAP (move_pages);
++ UNSUPPORTED_SYSCALL_MAP (rt_tgsigqueueinfo);
++ UNSUPPORTED_SYSCALL_MAP (perf_event_open);
++ UNSUPPORTED_SYSCALL_MAP (accept4);
++ UNSUPPORTED_SYSCALL_MAP (recvmmsg);
++
++ SYSCALL_MAP (wait4);
++
++ UNSUPPORTED_SYSCALL_MAP (prlimit64);
++ UNSUPPORTED_SYSCALL_MAP (fanotify_init);
++ UNSUPPORTED_SYSCALL_MAP (fanotify_mark);
++ UNSUPPORTED_SYSCALL_MAP (name_to_handle_at);
++ UNSUPPORTED_SYSCALL_MAP (open_by_handle_at);
++ UNSUPPORTED_SYSCALL_MAP (clock_adjtime);
++ UNSUPPORTED_SYSCALL_MAP (syncfs);
++ UNSUPPORTED_SYSCALL_MAP (setns);
++ UNSUPPORTED_SYSCALL_MAP (sendmmsg);
++ UNSUPPORTED_SYSCALL_MAP (process_vm_readv);
++ UNSUPPORTED_SYSCALL_MAP (process_vm_writev);
++ UNSUPPORTED_SYSCALL_MAP (kcmp);
++ UNSUPPORTED_SYSCALL_MAP (finit_module);
++ UNSUPPORTED_SYSCALL_MAP (sched_setattr);
++ UNSUPPORTED_SYSCALL_MAP (sched_getattr);
++ default:
++ return gdb_sys_no_syscall;
++ }
++}
++
++/* Record all registers but PC register for process-record. */
++
++static int
++sw64_all_but_pc_registers_record (struct regcache *regcache)
++{
++ int i;
++
++ for (i = SW64_A0_REGNUM; i < SW64_PC_REGNUM; i++)
++ if (record_full_arch_list_add_reg (regcache, i))
++ return -1;
++
++ if (record_full_arch_list_add_reg (regcache, SW64_CSR_REGNUM))
++ return -1;
++
++ return 0;
++}
++
++/* Handler for sw64 system call instruction recording. */
++
++static int
++sw64_linux_syscall_record (struct regcache *regcache)
++{
++ int ret = 0;
++ enum gdb_syscall syscall_gdb;
++ ULONGEST svc_number;
++ regcache_raw_read_unsigned (regcache, SW64_V0_REGNUM, &svc_number);//_ASM_ get regbuf
++ syscall_gdb =
++ sw64_canonicalize_syscall ((enum sw64_syscall) svc_number);
++
++ if (syscall_gdb < 0)
++ {
++ printf_unfiltered (_("Process record and replay target doesn't "
++ "support syscall number %s\n"),
++ plongest (svc_number));
++ return -1;
++ }
++
++ if (syscall_gdb == gdb_sys_sigreturn
++ || syscall_gdb == gdb_sys_rt_sigreturn)
++ {
++ if (sw64_all_but_pc_registers_record (regcache))
++ return -1;
++ return 0;
++ }
++
++ ret = record_linux_system_call (syscall_gdb, regcache, &sw64_linux_record_tdep);
++
++ if (ret != 0)
++ return ret;
++
++ /* Record the return value of the system call. */
++ if (record_full_arch_list_add_reg (regcache, SW64_V0_REGNUM))
++ return -1;
++
++ return 0;
++}
++#endif
++
++static void
++sw64_linux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
++{
++ linux_init_abi (info, gdbarch, 0);
++
++ /* Hook into the DWARF CFI frame unwinder. */
++ sw64_dwarf2_init_abi (info, gdbarch);
++
++ /* Hook into the MDEBUG frame unwinder. */
++ //sw64_mdebug_init_abi (info, gdbarch);
++
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ tdep->dynamic_sigtramp_offset = sw64_linux_sigtramp_offset;
++ tdep->sigcontext_addr = sw64_linux_sigcontext_addr;
++ tdep->pc_in_sigtramp = sw64_linux_pc_in_sigtramp;
++ tdep->jb_pc = 2;
++ tdep->jb_elt_size = 8;
++
++ set_gdbarch_skip_trampoline_code (gdbarch, find_solib_trampoline_target);
++
++ set_solib_svr4_fetch_link_map_offsets
++ (gdbarch, linux_lp64_fetch_link_map_offsets);
++
++ /* Enable TLS support. */
++ set_gdbarch_fetch_tls_load_module_address (gdbarch,
++ svr4_fetch_objfile_link_map);
++
++ set_gdbarch_iterate_over_regset_sections
++ (gdbarch, sw64_linux_iterate_over_regset_sections);
++
++ set_gdbarch_gdb_signal_from_target (gdbarch,
++ sw64_linux_gdb_signal_from_target);
++ set_gdbarch_gdb_signal_to_target (gdbarch,
++ sw64_linux_gdb_signal_to_target);
++
++#ifndef LHX20240711_catch
++ /* Support catch syscall */
++ set_xml_syscall_file_name (gdbarch, "syscalls/sw64-linux.xml");
++ /* Get the syscall number from the arch's register. */
++ set_gdbarch_get_syscall_number (gdbarch, sw64_linux_get_syscall_number);
++#endif
++
++#ifndef LHX20240716_record
++ /* Reversible debugging, process record. */
++ set_gdbarch_process_record (gdbarch, sw64_process_record);
++ /* Syscall record. */
++ tdep->sw64_syscall_record = sw64_linux_syscall_record;
++ /* Displaced stepping. */
++ //set_gdbarch_displaced_step_location (gdbarch,
++ // linux_displaced_step_location);
++ sw64_init_linux_record_tdep (&sw64_linux_record_tdep, 8);
++#endif
++}
++
++void _initialize_sw64_linux_tdep ();
++void
++_initialize_sw64_linux_tdep ()
++{
++ gdbarch_register_osabi (bfd_arch_sw64, 0, GDB_OSABI_LINUX,
++ sw64_linux_init_abi);
++
++ /* Initialize the standard target descriptions. */
++ initialize_tdesc_sw64_linux ();
++}
+diff -Naur gdb-14.1-after-patch/gdb/sw64-linux-tdep.h gdb-14.1-sw64/gdb/sw64-linux-tdep.h
+--- gdb-14.1-after-patch/gdb/sw64-linux-tdep.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/sw64-linux-tdep.h 2025-03-03 10:59:13.210000000 +0800
+@@ -0,0 +1,45 @@
++/* GNU/Linux on SW64 target support, prototypes.
++
++ Copyright (C) 2012-2020 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#ifndef SW64_LINUX_TDEP_H
++#define SW64_LINUX_TDEP_H
++
++#include "regset.h"
++
++/* The definitions of following two value come from sw64 kernel.
++ * linux-stable-sw/arch/sw_64/include/asm/elf.h:
++ * #define ELF_NGREG 33
++ * #define ELF_NFPREG 32
++ * It can also be found in header file /usr/include/sys/procfs.h
++ * */
++#define SW64_LINUX_SIZEOF_GREGSET (33 * SW64_REGISTER_SIZE)
++
++/* Refer to linux-stable-sw/arch/sw_64/kernel/process.c
++ * When fill in the fpu structure for a core dump,
++ * fp size in 4.19 is 32*8=256, but in 5.10 is 32*8*4=1024.
++ * kernel-4.19 (dump_elf_task_fp): memcpy(dest, sw->fp, 32 * 8);
++ * kernel-5.10 (dump_fpu): memcpy(fpu, ¤t->thread.fpstate, sizeof(*fpu));
++ */
++#define SW64_LINUX_SIZEOF_FPREGSET (32 * 4 * SW64_REGISTER_SIZE)
++
++/* Target descriptions. */
++extern const struct target_desc *tdesc_sw64_linux;
++
++#endif /* SW64_LINUX_TDEP_H */
diff --git a/gdb-14.1-add-support-for-SW64-002.patch b/gdb-14.1-add-support-for-SW64-002.patch
new file mode 100644
index 0000000000000000000000000000000000000000..24aedba51f84f23696fdd0f7dd975bd06c7018e7
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-002.patch
@@ -0,0 +1,3845 @@
+diff -Naur gdb-14.1-after-patch/gdb/sw64-tdep.c gdb-14.1-sw64/gdb/sw64-tdep.c
+--- gdb-14.1-after-patch/gdb/sw64-tdep.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/sw64-tdep.c 2025-03-03 10:59:13.660000000 +0800
+@@ -0,0 +1,2721 @@
++/* Target-dependent code for the SW64 architecture, for GDB, the GNU Debugger.
++
++ Copyright (C) 1993-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "defs.h"
++#include "frame.h"
++#include "frame-unwind.h"
++#include "frame-base.h"
++#include "dwarf2/frame.h"
++#include "inferior.h"
++#include "symtab.h"
++#include "value.h"
++#include "gdbcmd.h"
++#include "gdbcore.h"
++#include "dis-asm.h"
++#include "symfile.h"
++#include "objfiles.h"
++#include "linespec.h"
++#include "regcache.h"
++#include "reggroups.h"
++#include "arch-utils.h"
++#include "osabi.h"
++#include "block.h"
++#include "infcall.h"
++#include "trad-frame.h"
++
++#include "elf-bfd.h"
++
++#include "sw64-tdep.h"
++#include
++
++#ifndef LHX20240716_record
++#include "record.h"
++#include "record-full.h"
++#endif
++
++#ifndef LHX20240716_record
++#define submask(x) ((1L << ((x) + 1)) - 1)
++#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
++#define bit(obj,st) (((obj) >> (st)) & 1)
++#define rigg(obj,st) ((obj) >> (st))
++#endif
++
++/* Instruction decoding. The notations for registers, immediates and
++ opcodes are the same as the one used in Compaq's SW64 architecture
++ handbook. */
++
++#define INSN_OPCODE(insn) ((insn & 0xfc000000) >> 26)
++#define INSN_FUNC(insn) ((insn & 0xf000) >> 12)
++
++/* Memory instruction format */
++#define MEM_RA(insn) ((insn & 0x03e00000) >> 21)
++#define MEM_RB(insn) ((insn & 0x001f0000) >> 16)
++#define MEM_DISP(insn) \
++ (((insn & 0x8000) == 0) ? (insn & 0xffff) : -((-insn) & 0xffff))
++
++static const int lda_opcode = 0x3e;
++static const int stq_opcode = 0x2b;
++
++/* Branch instruction format */
++#define BR_RA(insn) MEM_RA(insn)
++
++static const int br_opcode = 0x04;
++static const int bne_opcode = 0x31;
++
++/* Operate instruction format */
++#define OPR_FUNCTION(insn) ((insn & 0xfe0) >> 5)
++#define OPR_HAS_IMMEDIATE(insn) ((insn & 0x1000) == 0x1000)
++#define OPR_RA(insn) MEM_RA(insn)
++#define OPR_RC(insn) ((insn & 0x1f))
++#define OPR_LIT(insn) ((insn & 0x1fe000) >> 13)
++
++
++static const int subq_opcode = 0x10;
++static const int subq_function = 0x09;
++
++
++/* Return the name of the REGNO register.
++
++ An empty name corresponds to a register number that used to
++ be used for a virtual register. That virtual register has
++ been removed, but the index is still reserved to maintain
++ compatibility with existing remote sw64 targets. */
++
++static const char *
++sw64_register_name (struct gdbarch *gdbarch, int regno)
++{
++ static const char * const register_names[] =
++ {
++#ifndef LHX20240710
++ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
++ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "fp",
++ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
++ "r24", "r25", "ra", "r27", "r28", "r29", "sp", "r31",
++#else
++ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
++ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
++ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
++ "t10", "t11", "ra", "t12", "at", "gp", "sp", "zero",
++#endif
++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "fpcr",
++ "pc", "", "unique", /* num = 67 */
++
++ "ef0", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef1", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef2", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef3", "ef", "ef", "ef", "ef", "ef", "ef", "ef", /* ef0 */
++ "ef4", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef5", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef6", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef7", "ef", "ef", "ef", "ef", "ef", "ef", "ef", /* ef1 */
++ "ef8", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef9", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef10", "ef", "ef", "ef", "ef", "ef", "ef", "ef",
++ "ef11", "ef", "ef", "ef", "ef", "ef", "ef", "ef", /* ef2 */ /* num = 163 */
++
++ "", "", "", "", /*"DA_MATCH", "DA_MASK", "DV__MATCH", "DV_MASK",*/ /* num = 167 */
++
++ "V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
++ "V8", "V9", "V10", "V11", "V12", "V13", "V14", "V15",
++ "V16", "V17", "V18", "V19", "V20", "V21", "V22", "V23",
++ "V24", "V25", "V26", "V27", "V28", "V29", "V30", "V31" /* num = 199 */
++ };
++
++ gdb_static_assert (SW64_NUM_REGS + NVEC_REGS == ARRAY_SIZE (register_names));
++
++ if ((regno < 0) || (regno >= ARRAY_SIZE(register_names)))
++ return NULL;
++
++ return register_names[regno];
++}
++
++static int
++sw64_cannot_fetch_register (struct gdbarch *gdbarch, int regno)
++{
++ return (strlen (sw64_register_name (gdbarch, regno)) == 0);
++}
++
++static int
++sw64_cannot_store_register (struct gdbarch *gdbarch, int regno)
++{
++ return (regno == SW64_ZERO_REGNUM
++ || strlen (sw64_register_name (gdbarch, regno)) == 0);
++}
++
++#ifndef LHX20240710
++/* Construct vector type for ext registers. */
++static struct type *
++sw64_vec_type (struct gdbarch *gdbarch)
++{
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++
++ if (!tdep->sw64_vec_type)
++ {
++ struct type *t, *elem;
++
++ t = arch_composite_type(gdbarch, "__gdb_builtin_type_vec256", TYPE_CODE_UNION);
++
++ elem = builtin_type (gdbarch)->builtin_long_long;
++ append_composite_type_field (t, "v4_int64", init_vector_type (elem, 4));
++
++ elem = builtin_type (gdbarch)->builtin_double;
++ append_composite_type_field (t, "v4_double", init_vector_type (elem, 4));
++
++ t->set_is_vector (true);
++ t->set_name ("builtin_type_vec256");
++ tdep->sw64_vec_type = t;
++ }
++
++ return tdep->sw64_vec_type;
++}
++#endif
++
++static struct type *
++sw64_register_type (struct gdbarch *gdbarch, int regno)
++{
++ if (regno == SW64_SP_REGNUM || regno == SW64_GP_REGNUM)
++ return builtin_type (gdbarch)->builtin_data_ptr;
++ if (regno == SW64_PC_REGNUM)
++ return builtin_type (gdbarch)->builtin_func_ptr;
++
++ /* Don't need to worry about little vs big endian until
++ some jerk tries to port to sw64-unicosmk. */
++ if (regno >= SW64_FP0_REGNUM && regno < SW64_FP0_REGNUM + 31)
++ return builtin_type (gdbarch)->builtin_double;
++
++#ifndef LHX20240710
++ if (regno >= SW64_VEC0_REGNUM && regno < SW64_VEC0_REGNUM + 31 )
++ return sw64_vec_type(gdbarch);
++
++ if (regno >= 67 && regno < 163)
++ return builtin_type (gdbarch)->builtin_double;
++#endif
++
++ return builtin_type (gdbarch)->builtin_int64;
++}
++
++/* Is REGNUM a member of REGGROUP? */
++
++static int
++sw64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
++ const struct reggroup *group)
++{
++ /* Filter out any registers eliminated, but whose regnum is
++ reserved for backward compatibility, e.g. the vfp. */
++ if (*gdbarch_register_name (gdbarch, regnum) == '\0')
++ return 0;
++
++ if (group == all_reggroup)
++ return 1;
++
++ /* Zero should not be saved or restored. Technically it is a general
++ register (just as $f31 would be a float if we represented it), but
++ there's no point displaying it during "info regs", so leave it out
++ of all groups except for "all". */
++ if (regnum == SW64_ZERO_REGNUM)
++ return 0;
++
++ /* All other registers are saved and restored. */
++ if (group == save_reggroup || group == restore_reggroup)
++ return 1;
++
++ /* All other groups are non-overlapping. */
++
++ /* Since this is really a PALcode memory slot... */
++ if (regnum == SW64_UNIQUE_REGNUM)
++ return group == system_reggroup;
++
++ /* Force the FPCR to be considered part of the floating point state. */
++ if (regnum == SW64_FPCR_REGNUM)
++ return group == float_reggroup;
++
++ if (regnum >= SW64_FP0_REGNUM && regnum < SW64_FP0_REGNUM + 31)
++ return group == float_reggroup;
++
++#ifndef LHX20240710
++ if (regnum >= SW64_VEC0_REGNUM && regnum < SW64_VEC0_REGNUM + 31)
++ return group == vector_reggroup;
++
++ if (regnum < SW64_ZERO_REGNUM )
++ return group == general_reggroup;
++ else
++ return 0;
++#endif
++}
++
++#ifndef LHX20240710
++static enum register_status
++sw64_vec_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
++ int regnum, gdb_byte *buf)
++{
++ //printf("[sw64_vec_register_read] REG: read $V%d\n", regnum);
++ int i, fpnum;
++ enum register_status status;
++ gdb_byte fp_buf[SW64_REGISTER_SIZE];
++ if (regnum >= SW64_VEC0_REGNUM && regnum < SW64_VEC0_REGNUM + 31)
++ {
++ fpnum = regnum - SW64_VEC0_REGNUM + SW64_FP0_REGNUM;
++ status = regcache->raw_read(fpnum, fp_buf);
++ if (status != REG_VALID)
++ return status;
++ memcpy (buf + 24, fp_buf, SW64_REGISTER_SIZE);
++ fpnum = regnum - SW64_VEC0_REGNUM + SW64_V0F3_REGNUM;
++ for (i = 0; i < 3; i++, fpnum -= 32) {
++ status = regcache->raw_read(fpnum, fp_buf);
++ if (status != REG_VALID)
++ return status;
++ memcpy (buf + (i << 3), fp_buf, SW64_REGISTER_SIZE);
++ }
++ }
++ return REG_VALID;
++}
++
++static void
++sw64_vec_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
++ int regnum, const gdb_byte *buf)
++{
++ //printf("[sw64_vec_register_write] REG: write $V%d\n", regnum);
++ if (regnum >= SW64_VEC0_REGNUM && regnum < SW64_VEC0_REGNUM + 31) {
++ int i, fpnum;
++
++ fpnum = regnum - SW64_VEC0_REGNUM + SW64_FP0_REGNUM;
++ regcache->raw_write (fpnum, buf+24);
++
++ fpnum = regnum - SW64_VEC0_REGNUM + SW64_V0F3_REGNUM;
++ for (i = 0; i < 3; i++, fpnum -= 32 ) {
++ regcache->raw_write (fpnum, buf + (i << 3));
++ }
++ }
++}
++#endif
++
++/* Read an instruction from memory at PC, looking through breakpoints. */
++/* n is power of 2, over 0 */
++#if 0
++unsigned int*
++sw64_read_insns (struct gdbarch *gdbarch, CORE_ADDR pc, int n)
++{
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++
++ /* alloc from stack frame, freed on return */
++ gdb_byte *buf = (gdb_byte *)XALLOCAVEC(int, n);
++ unsigned int *ibuf = XCNEWVEC(unsigned int, n);
++ gdb_byte *p;
++ int res, i;
++
++ res = target_read_memory (pc, buf, sizeof (int)*n);
++ if (res != 0)
++ memory_error (TARGET_XFER_E_IO, pc);
++
++ n >>= 1;
++ for (i = 0, p = buf; i < n; i += 2)
++ {
++ extract_long_unsigned_integer(p, sizeof(long), byte_order, (long *)&ibuf[i]);
++ p += sizeof(long);
++ }
++ return ibuf;
++}
++#endif
++
++/* The following represents exactly the conversion performed by
++ the LDS instruction. This applies to both single-precision
++ floating point and 32-bit integers. */
++
++static void
++sw64_lds (struct gdbarch *gdbarch, void *out, const void *in)
++{
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ ULONGEST mem
++ = extract_unsigned_integer ((const gdb_byte *) in, 4, byte_order);
++ ULONGEST frac = (mem >> 0) & 0x7fffff;
++ ULONGEST sign = (mem >> 31) & 1;
++ ULONGEST exp_msb = (mem >> 30) & 1;
++ ULONGEST exp_low = (mem >> 23) & 0x7f;
++ ULONGEST exp, reg;
++
++ exp = (exp_msb << 10) | exp_low;
++ if (exp_msb)
++ {
++ if (exp_low == 0x7f)
++ exp = 0x7ff;
++ }
++ else
++ {
++ if (exp_low != 0x00)
++ exp |= 0x380;
++ }
++
++ reg = (sign << 63) | (exp << 52) | (frac << 29);
++ store_unsigned_integer ((gdb_byte *) out, 8, byte_order, reg);
++}
++
++/* Similarly, this represents exactly the conversion performed by
++ the STS instruction. */
++
++static void
++sw64_sts (struct gdbarch *gdbarch, void *out, const void *in)
++{
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ ULONGEST reg, mem;
++
++ reg = extract_unsigned_integer ((const gdb_byte *) in, 8, byte_order);
++ mem = ((reg >> 32) & 0xc0000000) | ((reg >> 29) & 0x3fffffff);
++ store_unsigned_integer ((gdb_byte *) out, 4, byte_order, mem);
++}
++
++/* The sw64 needs a conversion between register and memory format if the
++ register is a floating point register and memory format is float, as the
++ register format must be double or memory format is an integer with 4
++ bytes, as the representation of integers in floating point
++ registers is different. */
++
++static int
++sw64_convert_register_p (struct gdbarch *gdbarch, int regno,
++ struct type *type)
++{
++ return (regno >= SW64_FP0_REGNUM && regno < SW64_FP0_REGNUM + 31
++ && type->length () == 4);
++}
++
++static int
++sw64_register_to_value (frame_info_ptr frame, int regnum,
++ struct type *valtype, gdb_byte *out,
++ int *optimizedp, int *unavailablep)
++{
++ struct gdbarch *gdbarch = get_frame_arch (frame);
++ struct value *value = get_frame_register_value (frame, regnum);
++
++ gdb_assert (value != NULL);
++ *optimizedp = value->optimized_out ();
++ *unavailablep = !value->entirely_available ();
++
++ if (*optimizedp || *unavailablep)
++ {
++ release_value (value);
++ return 0;
++ }
++
++ /* Convert to VALTYPE. */
++
++ gdb_assert (valtype->length () == 4);
++ sw64_sts (gdbarch, out, value->contents_all ().data ());
++
++ release_value (value);
++ return 1;
++}
++
++static void
++sw64_value_to_register (frame_info_ptr frame, int regnum,
++ struct type *valtype, const gdb_byte *in)
++{
++ gdb_byte out[SW64_REGISTER_SIZE];
++
++ gdb_assert (valtype->length () == 4);
++ gdb_assert (register_size (get_frame_arch (frame), regnum)
++ <= SW64_REGISTER_SIZE);
++ sw64_lds (get_frame_arch (frame), out, in);
++
++ put_frame_register (frame, regnum, out);
++}
++
++
++/* The sw64 passes the first six arguments in the registers, the rest on
++ the stack. The register arguments are stored in ARG_REG_BUFFER, and
++ then moved into the register file; this simplifies the passing of a
++ large struct which extends from the registers to the stack, plus avoids
++ three ptrace invocations per word.
++
++ We don't bother tracking which register values should go in integer
++ regs or fp regs; we load the same values into both.
++
++ If the called function is returning a structure, the address of the
++ structure to be returned is passed as a hidden first argument. */
++
++static CORE_ADDR
++sw64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
++ struct regcache *regcache, CORE_ADDR bp_addr,
++ int nargs, struct value **args, CORE_ADDR sp,
++ function_call_return_method return_method,
++ CORE_ADDR struct_addr)
++{
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ int i;
++ int accumulate_size = (return_method == return_method_struct) ? 8 : 0;
++ struct sw64_arg
++ {
++ const gdb_byte *contents;
++ int len;
++ int offset;
++ };
++ struct sw64_arg *sw64_args = XALLOCAVEC (struct sw64_arg, nargs);
++ struct sw64_arg *m_arg;
++ gdb_byte arg_reg_buffer[SW64_REGISTER_SIZE * SW64_NUM_ARG_REGS];
++ int required_arg_regs;
++ CORE_ADDR func_addr = find_function_addr (function, NULL);
++
++ /* The ABI places the address of the called function in T12. */
++ regcache_cooked_write_signed (regcache, SW64_T12_REGNUM, func_addr);
++
++ /* Set the return address register to point to the entry point
++ of the program, where a breakpoint lies in wait. */
++ regcache_cooked_write_signed (regcache, SW64_RA_REGNUM, bp_addr);
++
++ /* Lay out the arguments in memory. */
++ for (i = 0, m_arg = sw64_args; i < nargs; i++, m_arg++)
++ {
++ struct value *arg = args[i];
++ struct type *arg_type = check_typedef (arg->type ());
++
++ /* Cast argument to long if necessary as the compiler does it too. */
++ switch (arg_type->code ())
++ {
++ case TYPE_CODE_INT:
++ case TYPE_CODE_BOOL:
++ case TYPE_CODE_CHAR:
++ case TYPE_CODE_RANGE:
++ case TYPE_CODE_ENUM:
++ if (arg_type->length () == 4)
++ {
++ /* 32-bit values must be sign-extended to 64 bits
++ even if the base data type is unsigned. */
++ arg_type = builtin_type (gdbarch)->builtin_int32;
++ arg = value_cast (arg_type, arg);
++ }
++ if (arg_type->length () < SW64_REGISTER_SIZE)
++ {
++ arg_type = builtin_type (gdbarch)->builtin_int64;
++ arg = value_cast (arg_type, arg);
++ }
++ break;
++
++ case TYPE_CODE_FLT:
++ /* "float" arguments loaded in registers must be passed in
++ register format, aka "double". */
++ if (accumulate_size < sizeof (arg_reg_buffer)
++ && arg_type->length () == 4)
++ {
++ arg_type = builtin_type (gdbarch)->builtin_double;
++ arg = value_cast (arg_type, arg);
++ }
++ /* Tru64 5.1 has a 128-bit long double, and passes this by
++ invisible reference. No one else uses this data type. */
++ else if (arg_type->length () == 16)
++ {
++ /* Allocate aligned storage. */
++ sp = (sp & -16) - 16;
++
++ /* Write the real data into the stack. */
++ write_memory (sp, arg->contents ().data (), 16);
++
++ /* Construct the indirection. */
++ arg_type = lookup_pointer_type (arg_type);
++ arg = value_from_pointer (arg_type, sp);
++ }
++ break;
++
++ case TYPE_CODE_COMPLEX:
++ /* ??? The ABI says that complex values are passed as two
++ separate scalar values. This distinction only matters
++ for complex float. However, GCC does not implement this. */
++
++ /* Tru64 5.1 has a 128-bit long double, and passes this by
++ invisible reference. */
++ if (arg_type->length () == 32)
++ {
++ /* Allocate aligned storage. */
++ sp = (sp & -16) - 16;
++
++ /* Write the real data into the stack. */
++ write_memory (sp, arg->contents ().data (), 32);
++
++ /* Construct the indirection. */
++ arg_type = lookup_pointer_type (arg_type);
++ arg = value_from_pointer (arg_type, sp);
++ }
++ break;
++
++ default:
++ break;
++ }
++ m_arg->len = arg_type->length ();
++ m_arg->offset = accumulate_size;
++ accumulate_size = (accumulate_size + m_arg->len + 7) & ~7;
++ m_arg->contents = arg->contents ().data ();
++ }
++
++ /* Determine required argument register loads, loading an argument register
++ is expensive as it uses three ptrace calls. */
++ required_arg_regs = accumulate_size / 8;
++ if (required_arg_regs > SW64_NUM_ARG_REGS)
++ required_arg_regs = SW64_NUM_ARG_REGS;
++
++ /* Make room for the arguments on the stack. */
++ if (accumulate_size < sizeof(arg_reg_buffer))
++ accumulate_size = 0;
++ else
++ accumulate_size -= sizeof(arg_reg_buffer);
++ sp -= accumulate_size;
++
++ /* Keep sp aligned to a multiple of 16 as the ABI requires. */
++ sp &= ~15;
++
++ /* `Push' arguments on the stack. */
++ for (i = nargs; m_arg--, --i >= 0;)
++ {
++ const gdb_byte *contents = m_arg->contents;
++ int offset = m_arg->offset;
++ int len = m_arg->len;
++
++ /* Copy the bytes destined for registers into arg_reg_buffer. */
++ if (offset < sizeof(arg_reg_buffer))
++ {
++ if (offset + len <= sizeof(arg_reg_buffer))
++ {
++ memcpy (arg_reg_buffer + offset, contents, len);
++ continue;
++ }
++ else
++ {
++ int tlen = sizeof(arg_reg_buffer) - offset;
++ memcpy (arg_reg_buffer + offset, contents, tlen);
++ offset += tlen;
++ contents += tlen;
++ len -= tlen;
++ }
++ }
++
++ /* Everything else goes to the stack. */
++ write_memory (sp + offset - sizeof(arg_reg_buffer), contents, len);
++ }
++ if (return_method == return_method_struct)
++ store_unsigned_integer (arg_reg_buffer, SW64_REGISTER_SIZE,
++ byte_order, struct_addr);
++
++ /* Load the argument registers. */
++ for (i = 0; i < required_arg_regs; i++)
++ {
++ regcache->cooked_write (SW64_A0_REGNUM + i,
++ arg_reg_buffer + i * SW64_REGISTER_SIZE);
++ regcache->cooked_write (SW64_FPA0_REGNUM + i,
++ arg_reg_buffer + i * SW64_REGISTER_SIZE);
++ }
++
++ /* Finally, update the stack pointer. */
++ regcache_cooked_write_signed (regcache, SW64_SP_REGNUM, sp);
++
++ return sp;
++}
++
++/* Extract from REGCACHE the value about to be returned from a function
++ and copy it into VALBUF. */
++
++static void
++sw64_extract_return_value (struct type *valtype, struct regcache *regcache,
++ gdb_byte *valbuf)
++{
++ struct gdbarch *gdbarch = regcache->arch ();
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ gdb_byte raw_buffer[SW64_REGISTER_SIZE];
++ ULONGEST l;
++
++ switch (valtype->code ())
++ {
++ case TYPE_CODE_FLT:
++ switch (valtype->length ())
++ {
++ case 4:
++ regcache->cooked_read (SW64_FP0_REGNUM, raw_buffer);
++ sw64_sts (gdbarch, valbuf, raw_buffer);
++ break;
++
++ case 8:
++ regcache->cooked_read (SW64_FP0_REGNUM, valbuf);
++ break;
++
++ case 16:
++ regcache_cooked_read_unsigned (regcache, SW64_V0_REGNUM, &l);
++ read_memory (l, valbuf, 16);
++ break;
++
++ default:
++ internal_error (_("unknown floating point width"));
++ }
++ break;
++
++ case TYPE_CODE_COMPLEX:
++ switch (valtype->length ())
++ {
++ case 8:
++ /* ??? This isn't correct wrt the ABI, but it's what GCC does. */
++ regcache->cooked_read (SW64_FP0_REGNUM, valbuf);
++ break;
++
++ case 16:
++ regcache->cooked_read (SW64_FP0_REGNUM, valbuf);
++ regcache->cooked_read (SW64_FP0_REGNUM + 1, valbuf + 8);
++ break;
++
++ case 32:
++ regcache_cooked_read_unsigned (regcache, SW64_V0_REGNUM, &l);
++ read_memory (l, valbuf, 32);
++ break;
++
++ default:
++ internal_error (_("unknown floating point width"));
++ }
++ break;
++
++ default:
++ /* Assume everything else degenerates to an integer. */
++ regcache_cooked_read_unsigned (regcache, SW64_V0_REGNUM, &l);
++ store_unsigned_integer (valbuf, valtype->length (), byte_order, l);
++ break;
++ }
++}
++
++/* Insert the given value into REGCACHE as if it was being
++ returned by a function. */
++
++static void
++sw64_store_return_value (struct type *valtype, struct regcache *regcache,
++ const gdb_byte *valbuf)
++{
++ struct gdbarch *gdbarch = regcache->arch ();
++ gdb_byte raw_buffer[SW64_REGISTER_SIZE];
++ ULONGEST l;
++
++ switch (valtype->code ())
++ {
++ case TYPE_CODE_FLT:
++ switch (valtype->length ())
++ {
++ case 4:
++ sw64_lds (gdbarch, raw_buffer, valbuf);
++ regcache->cooked_write (SW64_FP0_REGNUM, raw_buffer);
++ break;
++
++ case 8:
++ regcache->cooked_write (SW64_FP0_REGNUM, valbuf);
++ break;
++
++ case 16:
++ /* FIXME: 128-bit long doubles are returned like structures:
++ by writing into indirect storage provided by the caller
++ as the first argument. */
++ error (_("Cannot set a 128-bit long double return value."));
++
++ default:
++ internal_error (_("unknown floating point width"));
++ }
++ break;
++
++ case TYPE_CODE_COMPLEX:
++ switch (valtype->length ())
++ {
++ case 8:
++ /* ??? This isn't correct wrt the ABI, but it's what GCC does. */
++ regcache->cooked_write (SW64_FP0_REGNUM, valbuf);
++ break;
++
++ case 16:
++ regcache->cooked_write (SW64_FP0_REGNUM, valbuf);
++ regcache->cooked_write (SW64_FP0_REGNUM + 1, valbuf + 8);
++ break;
++
++ case 32:
++ /* FIXME: 128-bit long doubles are returned like structures:
++ by writing into indirect storage provided by the caller
++ as the first argument. */
++ error (_("Cannot set a 128-bit long double return value."));
++
++ default:
++ internal_error (_("unknown floating point width"));
++ }
++ break;
++
++ default:
++ /* Assume everything else degenerates to an integer. */
++ /* 32-bit values must be sign-extended to 64 bits
++ even if the base data type is unsigned. */
++ if (valtype->length () == 4)
++ valtype = builtin_type (gdbarch)->builtin_int32;
++ l = unpack_long (valtype, valbuf);
++ regcache_cooked_write_unsigned (regcache, SW64_V0_REGNUM, l);
++ break;
++ }
++}
++
++static enum return_value_convention
++sw64_return_value (struct gdbarch *gdbarch, struct value *function,
++ struct type *type, struct regcache *regcache,
++ gdb_byte *readbuf, const gdb_byte *writebuf)
++{
++ enum type_code code = type->code ();
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++
++ if ((code == TYPE_CODE_STRUCT
++ || code == TYPE_CODE_UNION
++ || code == TYPE_CODE_ARRAY)
++ && tdep->return_in_memory (type))
++ {
++ if (readbuf)
++ {
++ ULONGEST addr;
++ regcache_raw_read_unsigned (regcache, SW64_V0_REGNUM, &addr);
++ read_memory (addr, readbuf, type->length ());
++ }
++
++ return RETURN_VALUE_ABI_RETURNS_ADDRESS;
++ }
++
++ if (readbuf)
++ sw64_extract_return_value (type, regcache, readbuf);
++ if (writebuf)
++ sw64_store_return_value (type, regcache, writebuf);
++
++ return RETURN_VALUE_REGISTER_CONVENTION;
++}
++
++static int
++sw64_return_in_memory_always (struct type *type)
++{
++ return 1;
++}
++
++
++constexpr gdb_byte sw64_break_insn[] = { 0x80, 0, 0, 0 }; /* call_pal bpt */
++
++typedef BP_MANIPULATION (sw64_break_insn) sw64_breakpoint;
++
++
++/* This returns the PC of the first insn after the prologue.
++ If we can't find the prologue, then return 0. */
++
++CORE_ADDR
++sw64_after_prologue (CORE_ADDR pc)
++{
++ struct symtab_and_line sal;
++ CORE_ADDR func_addr, func_end;
++
++ if (!find_pc_partial_function (pc, NULL, &func_addr, &func_end))
++ return 0;
++
++ sal = find_pc_line (func_addr, 0);
++ if (sal.end < func_end)
++ return sal.end;
++
++ /* The line after the prologue is after the end of the function. In this
++ case, tell the caller to find the prologue the hard way. */
++ return 0;
++}
++
++/* Read an instruction from memory at PC, looking through breakpoints. */
++
++unsigned int
++sw64_read_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ gdb_byte buf[SW64_INSN_SIZE];
++ int res;
++
++ res = target_read_memory (pc, buf, sizeof (buf));
++ if (res != 0)
++ memory_error (TARGET_XFER_E_IO, pc);
++ return extract_unsigned_integer (buf, sizeof (buf), byte_order);
++}
++
++/* To skip prologues, I use this predicate. Returns either PC itself
++ if the code at PC does not look like a function prologue; otherwise
++ returns an address that (if we're lucky) follows the prologue. If
++ LENIENT, then we must skip everything which is involved in setting
++ up the frame (it's OK to skip more, just so long as we don't skip
++ anything which might clobber the registers which are being saved. */
++
++static CORE_ADDR
++sw64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ unsigned long inst;
++ int offset;
++ CORE_ADDR post_prologue_pc;
++ gdb_byte buf[SW64_INSN_SIZE];
++
++ /* Silently return the unaltered pc upon memory errors.
++ This could happen on OSF/1 if decode_line_1 tries to skip the
++ prologue for quickstarted shared library functions when the
++ shared library is not yet mapped in.
++ Reading target memory is slow over serial lines, so we perform
++ this check only if the target has shared libraries (which all
++ SW64 targets do). */
++ if (target_read_memory (pc, buf, sizeof (buf)))
++ return pc;
++
++ /* See if we can determine the end of the prologue via the symbol table.
++ If so, then return either PC, or the PC after the prologue, whichever
++ is greater. */
++
++ post_prologue_pc = sw64_after_prologue (pc);
++ if (post_prologue_pc != 0)
++ return std::max (pc, post_prologue_pc);
++
++ /* Can't determine prologue from the symbol table, need to examine
++ instructions. */
++
++ /* Skip the typical prologue instructions. These are the stack adjustment
++ instruction and the instructions that save registers on the stack
++ or in the gcc frame. */
++ for (offset = 0; offset < 100; offset += SW64_INSN_SIZE)
++ {
++ inst = sw64_read_insn (gdbarch, pc + offset);
++#ifndef LHX20240710
++ if ((inst & 0xffff0000) == 0xffbb0000) /* ldih $gp,n($t12) */
++ continue;
++ if ((inst & 0xffff0000) == 0xfbbd0000) /* ldi $gp,n($gp) */
++ continue;
++ if ((inst & 0xffff0000) == 0x8f7d0000) /* ldl t12,n($gp) */
++ continue;
++
++ if ((inst & 0xfffff000) == 0xfbde8000) /* ldi $sp,-n($sp) */
++ continue;
++ if ((inst & 0xffe01fff) == 0x43c0153e) /* subq $sp,n,$sp */
++ continue;
++
++ if ((inst & 0xfc1f0000) == 0xac1e0000) /* stl reg,n($sp) */
++ continue;
++
++ if (inst == 0x43de074f) /* bis sp,sp,fp */
++ continue;
++ if (inst == 0x43fe074f) /* bis zero,sp,fp */
++ continue;
++#endif
++ break;
++ }
++ return pc + offset;
++}
++
++
++#ifndef LHX20240710
++static const int ldq_l_opcode = 0x08; //lldl opcode
++static const int ldq_l_func = 0x01; //lldl function
++
++static const int ldl_l_opcode = 0x08; //lldw opcode
++static const int ldl_l_func = 0x00; //lldw function
++
++static const int stl_c_opcode = 0x08; //lstw opcode
++static const int stl_c_func = 0x08; //lstw function
++
++static const int stq_c_opcode = 0x08; //lstl opcode
++static const int stq_c_func = 0x09; //lstl function
++#endif
++
++/* Checks for an atomic sequence of instructions beginning with a LDL_L/LDQ_L
++ instruction and ending with a STL_C/STQ_C instruction. If such a sequence
++ is found, attempt to step through it. A breakpoint is placed at the end of
++ the sequence. */
++
++static std::vector
++sw64_deal_with_atomic_sequence (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ CORE_ADDR breaks[2] = {CORE_ADDR_MAX, CORE_ADDR_MAX};
++ CORE_ADDR loc = pc;
++ unsigned int insn = sw64_read_insn (gdbarch, loc);
++ int insn_count;
++ int index;
++ int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
++ const int atomic_sequence_length = 16; /* Instruction sequence length. */
++
++ /* Assume all atomic sequences start with a LDL_L/LDQ_L instruction. */
++#ifndef LHX20240710
++ if((INSN_OPCODE (insn) == ldl_l_opcode && (INSN_FUNC (insn) == ldl_l_func)) ||
++ (INSN_OPCODE (insn) == ldq_l_opcode && (INSN_FUNC (insn) == ldq_l_func)))
++ ; // do nothing, continue
++ else
++ return {};
++#endif
++
++ /* Assume that no atomic sequence is longer than "atomic_sequence_length"
++ instructions. */
++ for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
++ {
++ loc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, loc);
++
++ /* Assume that there is at most one branch in the atomic
++ sequence. If a branch is found, put a breakpoint in
++ its destination address. */
++#if 0 // There is no branch in the atomic sequence in sw64
++ if (INSN_OPCODE (insn) >= br_opcode)
++ {
++ int immediate = (insn & 0x001fffff) << 2;
++
++ immediate = (immediate ^ 0x400000) - 0x400000;
++
++ if (bc_insn_count >= 1)
++ return {}; /* More than one branch found, fallback
++ to the standard single-step code. */
++
++ breaks[1] = loc + SW64_INSN_SIZE + immediate;
++
++ bc_insn_count++;
++ last_breakpoint++;
++ }
++#endif
++
++#ifndef LHX20240710
++ if((INSN_OPCODE (insn) == stl_c_opcode && (INSN_FUNC (insn) == stl_c_func))
++ || (INSN_OPCODE (insn) == stq_c_opcode && (INSN_FUNC (insn) == stq_c_func)))
++ break;
++#endif
++ }
++
++ /* Assume that the atomic sequence ends with a STL_C/STQ_C instruction. */
++#ifndef LHX20220407
++ if((INSN_OPCODE (insn) == stl_c_opcode && (INSN_FUNC (insn) == stl_c_func)) ||
++ (INSN_OPCODE (insn) == stq_c_opcode && (INSN_FUNC (insn) == stq_c_func)))
++ ; // do nothing, continue
++ else
++ return {};
++#endif
++
++#ifndef LHX20240710
++ loc += SW64_INSN_SIZE * 2; // also skip rd_f instruction which after lstw/lstl
++#endif
++
++ /* Insert a breakpoint right after the end of the atomic sequence. */
++ breaks[0] = loc;
++
++ /* Check for duplicated breakpoints. Check also for a breakpoint
++ placed (branch instruction's destination) anywhere in sequence. */
++ std::vector next_pcs;
++
++ for (index = 0; index <= last_breakpoint; index++)
++ next_pcs.push_back (breaks[index]);
++
++ return next_pcs;
++}
++
++#ifndef LHX20240710
++static void
++sw64_skip_permanent_breakpoint (struct regcache *regcache)
++{
++ CORE_ADDR current_pc = regcache_read_pc (regcache);
++ current_pc += 4;
++ regcache_write_pc (regcache, current_pc);
++}
++#endif
++
++
++/* Figure out where the longjmp will land.
++ We expect the first arg to be a pointer to the jmp_buf structure from
++ which we extract the PC (JB_PC) that we will land at. The PC is copied
++ into the "pc". This routine returns true on success. */
++
++static int
++sw64_get_longjmp_target (frame_info_ptr frame, CORE_ADDR *pc)
++{
++ struct gdbarch *gdbarch = get_frame_arch (frame);
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
++ CORE_ADDR jb_addr;
++ gdb_byte raw_buffer[SW64_REGISTER_SIZE];
++
++ jb_addr = get_frame_register_unsigned (frame, SW64_A0_REGNUM);
++
++ if (target_read_memory (jb_addr + (tdep->jb_pc * tdep->jb_elt_size),
++ raw_buffer, tdep->jb_elt_size))
++ return 0;
++
++ *pc = extract_unsigned_integer (raw_buffer, tdep->jb_elt_size, byte_order);
++ return 1;
++}
++
++
++/* Frame unwinder for signal trampolines. We use sw64 tdep bits that
++ describe the location and shape of the sigcontext structure. After
++ that, all registers are in memory, so it's easy. */
++/* ??? Shouldn't we be able to do this generically, rather than with
++ OSABI data specific to SW64? */
++
++struct sw64_sigtramp_unwind_cache
++{
++ CORE_ADDR sigcontext_addr;
++};
++
++static struct sw64_sigtramp_unwind_cache *
++sw64_sigtramp_frame_unwind_cache (frame_info_ptr this_frame,
++ void **this_prologue_cache)
++{
++ struct sw64_sigtramp_unwind_cache *info;
++
++ if (*this_prologue_cache)
++ return (struct sw64_sigtramp_unwind_cache *) *this_prologue_cache;
++
++ info = FRAME_OBSTACK_ZALLOC (struct sw64_sigtramp_unwind_cache);
++ *this_prologue_cache = info;
++
++ gdbarch *arch = get_frame_arch (this_frame);
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (arch);
++ info->sigcontext_addr = tdep->sigcontext_addr (this_frame);
++
++ return info;
++}
++
++/* Return the address of REGNUM in a sigtramp frame. Since this is
++ all arithmetic, it doesn't seem worthwhile to cache it. */
++
++static CORE_ADDR
++sw64_sigtramp_register_address (struct gdbarch *gdbarch,
++ CORE_ADDR sigcontext_addr, int regnum)
++{
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++
++ if (regnum >= 0 && regnum < 32)
++ return sigcontext_addr + tdep->sc_regs_offset + regnum * 8;
++ else if (regnum >= SW64_FP0_REGNUM && regnum < SW64_FP0_REGNUM + 32)
++ return sigcontext_addr + tdep->sc_fpregs_offset + regnum * 8;
++ else if (regnum == SW64_PC_REGNUM)
++ return sigcontext_addr + tdep->sc_pc_offset;
++
++ return 0;
++}
++
++/* Given a GDB frame, determine the address of the calling function's
++ frame. This will be used to create a new GDB frame struct. */
++
++static void
++sw64_sigtramp_frame_this_id (frame_info_ptr this_frame,
++ void **this_prologue_cache,
++ struct frame_id *this_id)
++{
++ struct gdbarch *gdbarch = get_frame_arch (this_frame);
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ struct sw64_sigtramp_unwind_cache *info
++ = sw64_sigtramp_frame_unwind_cache (this_frame, this_prologue_cache);
++ CORE_ADDR stack_addr, code_addr;
++
++ /* If the OSABI couldn't locate the sigcontext, give up. */
++ if (info->sigcontext_addr == 0)
++ return;
++
++ /* If we have dynamic signal trampolines, find their start.
++ If we do not, then we must assume there is a symbol record
++ that can provide the start address. */
++ if (tdep->dynamic_sigtramp_offset)
++ {
++ int offset;
++ code_addr = get_frame_pc (this_frame);
++ offset = tdep->dynamic_sigtramp_offset (gdbarch, code_addr);
++ if (offset >= 0)
++ code_addr -= offset;
++ else
++ code_addr = 0;
++ }
++ else
++ code_addr = get_frame_func (this_frame);
++
++ /* The stack address is trivially read from the sigcontext. */
++ stack_addr = sw64_sigtramp_register_address (gdbarch, info->sigcontext_addr,
++ SW64_SP_REGNUM);
++ stack_addr = get_frame_memory_unsigned (this_frame, stack_addr,
++ SW64_REGISTER_SIZE);
++
++ *this_id = frame_id_build (stack_addr, code_addr);
++}
++
++/* Retrieve the value of REGNUM in FRAME. Don't give up! */
++
++static struct value *
++sw64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
++ void **this_prologue_cache, int regnum)
++{
++ struct sw64_sigtramp_unwind_cache *info
++ = sw64_sigtramp_frame_unwind_cache (this_frame, this_prologue_cache);
++ CORE_ADDR addr;
++
++ if (info->sigcontext_addr != 0)
++ {
++ /* All integer and fp registers are stored in memory. */
++ addr = sw64_sigtramp_register_address (get_frame_arch (this_frame),
++ info->sigcontext_addr, regnum);
++ if (addr != 0)
++ return frame_unwind_got_memory (this_frame, regnum, addr);
++ }
++
++ /* This extra register may actually be in the sigcontext, but our
++ current description of it in sw64_sigtramp_frame_unwind_cache
++ doesn't include it. Too bad. Fall back on whatever's in the
++ outer frame. */
++ return frame_unwind_got_register (this_frame, regnum, regnum);
++}
++
++static int
++sw64_sigtramp_frame_sniffer (const struct frame_unwind *self,
++ frame_info_ptr this_frame,
++ void **this_prologue_cache)
++{
++ struct gdbarch *gdbarch = get_frame_arch (this_frame);
++ CORE_ADDR pc = get_frame_pc (this_frame);
++ const char *name;
++
++ /* NOTE: cagney/2004-04-30: Do not copy/clone this code. Instead
++ look at tramp-frame.h and other simpler per-architecture
++ sigtramp unwinders. */
++
++ /* We shouldn't even bother to try if the OSABI didn't register a
++ sigcontext_addr handler or pc_in_sigtramp handler. */
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ if (tdep->sigcontext_addr == NULL)
++ return 0;
++
++ if (tdep->pc_in_sigtramp == NULL)
++ return 0;
++
++ /* Otherwise we should be in a signal frame. */
++ find_pc_partial_function (pc, &name, NULL, NULL);
++ if (tdep->pc_in_sigtramp (gdbarch, pc, name))
++ return 1;
++
++ return 0;
++}
++
++static const struct frame_unwind sw64_sigtramp_frame_unwind =
++{
++ "sw64 sigtramp",
++ SIGTRAMP_FRAME,
++ default_frame_unwind_stop_reason,
++ sw64_sigtramp_frame_this_id,
++ sw64_sigtramp_frame_prev_register,
++ NULL,
++ sw64_sigtramp_frame_sniffer
++};
++
++
++
++/* Heuristic_proc_start may hunt through the text section for a long
++ time across a 2400 baud serial line. Allows the user to limit this
++ search. */
++#ifndef LHX20240710
++static int heuristic_fence_post = 40;
++#else
++static int heuristic_fence_post = 0;
++#endif
++
++/* Attempt to locate the start of the function containing PC. We assume that
++ the previous function ends with an about_to_return insn. Not foolproof by
++ any means, since gcc is happy to put the epilogue in the middle of a
++ function. But we're guessing anyway... */
++
++static CORE_ADDR
++sw64_heuristic_proc_start (struct gdbarch *gdbarch, CORE_ADDR pc)
++{
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++ CORE_ADDR last_non_nop = pc;
++ CORE_ADDR fence = pc - heuristic_fence_post;
++ CORE_ADDR orig_pc = pc;
++ CORE_ADDR func;
++ struct inferior *inf;
++
++ if (pc == 0)
++ return 0;
++
++ /* First see if we can find the start of the function from minimal
++ symbol information. This can succeed with a binary that doesn't
++ have debug info, but hasn't been stripped. */
++ func = get_pc_function_start (pc);
++ if (func)
++ return func;
++
++ if (heuristic_fence_post == -1
++ || fence < tdep->vm_min_address)
++ fence = tdep->vm_min_address;
++
++ /* Search back for previous return; also stop at a 0, which might be
++ seen for instance before the start of a code section. Don't include
++ nops, since this usually indicates padding between functions. */
++#ifdef LHX20240710
++ for (pc -= SW64_INSN_SIZE; pc >= fence; pc -= SW64_INSN_SIZE)
++ {
++ unsigned int insn = sw64_read_insn (gdbarch, pc);
++ switch (insn)
++ {
++ case 0: /* invalid insn */
++ case 0x6bfa8001: /* ret $31,($26),1 */
++ return last_non_nop;
++
++ case 0x2ffe0000: /* unop: ldq_u $31,0($30) */
++ case 0x47ff041f: /* nop: bis $31,$31,$31 */
++ break;
++
++ default:
++ last_non_nop = pc;
++ break;
++ }
++ }
++#else
++ for (pc -= SW64_INSN_SIZE; pc >= fence; pc -= SW64_INSN_SIZE)
++ {
++ unsigned int insn = sw64_read_insn (gdbarch, pc);
++
++ switch (insn)
++ {
++ case 0: /* invalid insn */
++ case 0x6bfa8001: /* ret $31,($26),1 */
++ case 0x0bfa0001: /* mater ret */
++ return last_non_nop;
++
++ case 0x2ffe0000: /* unop: ldq_u $31,0($30) */
++ case 0x47ff041f: /* nop: bis $31,$31,$31 */
++ break;
++
++ default:
++ last_non_nop = pc;
++ break;
++ }
++ }
++#endif
++
++ inf = current_inferior ();
++
++ /* It's not clear to me why we reach this point when stopping quietly,
++ but with this test, at least we don't print out warnings for every
++ child forked (eg, on decstation). 22apr93 rich@cygnus.com. */
++ if (inf->control.stop_soon == NO_STOP_QUIETLY)
++ {
++ static int blurb_printed = 0;
++
++ if (fence == tdep->vm_min_address)
++ warning (_("Hit beginning of text section without finding \
++enclosing function for address %s"), paddress (gdbarch, orig_pc));
++ else
++ warning (_("Hit heuristic-fence-post without finding \
++enclosing function for address %s"), paddress (gdbarch, orig_pc));
++
++ if (!blurb_printed)
++ {
++ gdb_printf (_("\
++This warning occurs if you are debugging a function without any symbols\n\
++(for example, in a stripped executable). In that case, you may wish to\n\
++increase the size of the search with the `set heuristic-fence-post' command.\n\
++\n\
++Otherwise, you told GDB there was a function where there isn't one, or\n\
++(more likely) you have encountered a bug in GDB.\n"));
++ blurb_printed = 1;
++ }
++ }
++
++ return 0;
++}
++
++/* Fallback sw64 frame unwinder. Uses instruction scanning and knows
++ something about the traditional layout of sw64 stack frames. */
++
++struct sw64_heuristic_unwind_cache
++{
++ CORE_ADDR vfp;
++ CORE_ADDR start_pc;
++ struct trad_frame_saved_reg *saved_regs;
++ int return_reg;
++};
++
++/* If a probing loop sequence starts at PC, simulate it and compute
++ FRAME_SIZE and PC after its execution. Otherwise, return with PC and
++ FRAME_SIZE unchanged. */
++
++static void
++sw64_heuristic_analyze_probing_loop (struct gdbarch *gdbarch, CORE_ADDR *pc,
++ int *frame_size)
++{
++ CORE_ADDR cur_pc = *pc;
++ int cur_frame_size = *frame_size;
++ int nb_of_iterations, reg_index, reg_probe;
++ unsigned int insn;
++
++ /* The following pattern is recognized as a probing loop:
++
++ lda REG_INDEX,NB_OF_ITERATIONS
++ lda REG_PROBE,(sp)
++
++ LOOP_START:
++ stq zero,(REG_PROBE)
++ subq REG_INDEX,0x1,REG_INDEX
++ lda REG_PROBE,(REG_PROBE)
++ bne REG_INDEX, LOOP_START
++
++ lda sp,(REG_PROBE)
++
++ If anything different is found, the function returns without
++ changing PC and FRAME_SIZE. Otherwise, PC will point immediately
++ after this sequence, and FRAME_SIZE will be updated. */
++
++ /* lda REG_INDEX,NB_OF_ITERATIONS */
++
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != lda_opcode)
++ return;
++ reg_index = MEM_RA (insn);
++ nb_of_iterations = MEM_DISP (insn);
++
++ /* lda REG_PROBE,(sp) */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != lda_opcode
++ || MEM_RB (insn) != SW64_SP_REGNUM)
++ return;
++ reg_probe = MEM_RA (insn);
++ cur_frame_size -= MEM_DISP (insn);
++
++ /* stq zero,(REG_PROBE) */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != stq_opcode
++ || MEM_RA (insn) != 0x1f
++ || MEM_RB (insn) != reg_probe)
++ return;
++
++ /* subq REG_INDEX,0x1,REG_INDEX */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != subq_opcode
++ || !OPR_HAS_IMMEDIATE (insn)
++ || OPR_FUNCTION (insn) != subq_function
++ || OPR_LIT(insn) != 1
++ || OPR_RA (insn) != reg_index
++ || OPR_RC (insn) != reg_index)
++ return;
++
++ /* lda REG_PROBE,(REG_PROBE) */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != lda_opcode
++ || MEM_RA (insn) != reg_probe
++ || MEM_RB (insn) != reg_probe)
++ return;
++ cur_frame_size -= MEM_DISP (insn) * nb_of_iterations;
++
++ /* bne REG_INDEX, LOOP_START */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != bne_opcode
++ || MEM_RA (insn) != reg_index)
++ return;
++
++ /* lda sp,(REG_PROBE) */
++
++ cur_pc += SW64_INSN_SIZE;
++ insn = sw64_read_insn (gdbarch, cur_pc);
++ if (INSN_OPCODE (insn) != lda_opcode
++ || MEM_RA (insn) != SW64_SP_REGNUM
++ || MEM_RB (insn) != reg_probe)
++ return;
++ cur_frame_size -= MEM_DISP (insn);
++
++ *pc = cur_pc;
++ *frame_size = cur_frame_size;
++}
++
++static struct sw64_heuristic_unwind_cache *
++sw64_heuristic_frame_unwind_cache (frame_info_ptr this_frame,
++ void **this_prologue_cache,
++ CORE_ADDR start_pc)
++{
++ struct gdbarch *gdbarch = get_frame_arch (this_frame);
++ struct sw64_heuristic_unwind_cache *info;
++ ULONGEST val;
++ CORE_ADDR limit_pc, cur_pc;
++ int frame_reg, frame_size, return_reg, reg;
++
++ if (*this_prologue_cache)
++ return (struct sw64_heuristic_unwind_cache *) *this_prologue_cache;
++
++ info = FRAME_OBSTACK_ZALLOC (struct sw64_heuristic_unwind_cache);
++ *this_prologue_cache = info;
++ info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
++
++ limit_pc = get_frame_pc (this_frame);
++ if (start_pc == 0)
++ start_pc = sw64_heuristic_proc_start (gdbarch, limit_pc);
++ info->start_pc = start_pc;
++
++ frame_reg = SW64_SP_REGNUM;
++ frame_size = 0;
++ return_reg = -1;
++
++ /* If we've identified a likely place to start, do code scanning. */
++ if (start_pc != 0)
++ {
++ /* Limit the forward search to 50 instructions. */
++ if (start_pc + 200 < limit_pc)
++ limit_pc = start_pc + 200;
++
++ for (cur_pc = start_pc; cur_pc < limit_pc; cur_pc += SW64_INSN_SIZE)
++ {
++ unsigned int word = sw64_read_insn (gdbarch, cur_pc);
++
++ //if ((word & 0xffff0000) == 0x23de0000) /* lda $sp,n($sp) */
++ if ((word & 0xffff8000) == 0xfbde8000)
++ {
++ if (word & 0x8000)
++ {
++ /* Consider only the first stack allocation instruction
++ to contain the static size of the frame. */
++ if (frame_size == 0)
++ frame_size = (-word) & 0xffff;
++ }
++ else
++ {
++ /* Exit loop if a positive stack adjustment is found, which
++ usually means that the stack cleanup code in the function
++ epilogue is reached. */
++ break;
++ }
++ }
++ //else if ((word & 0xfc1f0000) == 0xb41e0000) /* stq reg,n($sp) */
++ else if ((word & 0xfc1f8000) == 0xac1e0000 /* stl reg,n($sp) , n>0 */
++ || (word & 0xfc1f8000) == 0xac0f0000) /* stl reg,n($fp) , n>0 */
++ {
++ reg = (word & 0x03e00000) >> 21;
++
++ /* Ignore this instruction if we have already encountered
++ an instruction saving the same register earlier in the
++ function code. The current instruction does not tell
++ us where the original value upon function entry is saved.
++ All it says is that the function we are scanning reused
++ that register for some computation of its own, and is now
++ saving its result. */
++ if (info->saved_regs[reg].is_addr ())
++ continue;
++
++ if (reg == 31)
++ continue;
++
++ /* Do not compute the address where the register was saved yet,
++ because we don't know yet if the offset will need to be
++ relative to $sp or $fp (we can not compute the address
++ relative to $sp if $sp is updated during the execution of
++ the current subroutine, for instance when doing some alloca).
++ So just store the offset for the moment, and compute the
++ address later when we know whether this frame has a frame
++ pointer or not. */
++ /* Hack: temporarily add one, so that the offset is non-zero
++ and we can tell which registers have save offsets below. */
++ info->saved_regs[reg].set_addr ((word & 0xffff) + 1);
++
++ /* Starting with OSF/1-3.2C, the system libraries are shipped
++ without local symbols, but they still contain procedure
++ descriptors without a symbol reference. GDB is currently
++ unable to find these procedure descriptors and uses
++ heuristic_proc_desc instead.
++ As some low level compiler support routines (__div*, __add*)
++ use a non-standard return address register, we have to
++ add some heuristics to determine the return address register,
++ or stepping over these routines will fail.
++ Usually the return address register is the first register
++ saved on the stack, but assembler optimization might
++ rearrange the register saves.
++ So we recognize only a few registers (t7, t9, ra) within
++ the procedure prologue as valid return address registers.
++ If we encounter a return instruction, we extract the
++ return address register from it.
++
++ FIXME: Rewriting GDB to access the procedure descriptors,
++ e.g. via the minimal symbol table, might obviate this
++ hack. */
++ if (return_reg == -1
++ && cur_pc < (start_pc + 80)
++ && (reg == SW64_T7_REGNUM
++ || reg == SW64_T9_REGNUM
++ || reg == SW64_RA_REGNUM))
++ return_reg = reg;
++ }
++#ifndef LHX20240710
++ else if ((word & 0xffe0ffff) == 0x0be00001) /* ret zero,reg,1 */
++#endif
++ return_reg = (word >> 16) & 0x1f;
++#ifndef LHX20240710
++ else if (word == 0x43de074f) /* bis sp,sp,fp */
++ frame_reg = SW64_GCC_FP_REGNUM;
++ else if (word == 0x43fe074f) /* bis zero,sp,fp */
++ frame_reg = SW64_GCC_FP_REGNUM;
++#endif
++ sw64_heuristic_analyze_probing_loop (gdbarch, &cur_pc, &frame_size);
++ }
++
++ /* If we haven't found a valid return address register yet, keep
++ searching in the procedure prologue. */
++ if (return_reg == -1)
++ {
++ while (cur_pc < (limit_pc + 80) && cur_pc < (start_pc + 80))
++ {
++ unsigned int word = sw64_read_insn (gdbarch, cur_pc);
++
++#ifndef LHX20240710
++ if ((word & 0xfc1f0000) ==0xac1e0000 /* stl reg,n($sp) */
++ || (word & 0xfc1f0000) == 0xac0f0000 /* stl reg,n($fp) */
++ || (word & 0xfc1f0000) == 0xa80f0000 /* stw reg,n($fp) */
++ || (word & 0xfc1f0000) == 0xa81e0000) /* stw reg,n($sp) */
++#endif
++ {
++ reg = (word & 0x03e00000) >> 21;
++ if (reg == SW64_T7_REGNUM
++ || reg == SW64_T9_REGNUM
++ || reg == SW64_RA_REGNUM)
++ {
++ return_reg = reg;
++ break;
++ }
++ }
++#ifndef LHX20240710
++ else if ((word & 0xffe0ffff) == 0x0be00001) /* ret zero,reg,1 */
++#endif
++ {
++ return_reg = (word >> 16) & 0x1f;
++ break;
++ }
++
++ cur_pc += SW64_INSN_SIZE;
++ }
++ }
++ }
++
++ /* Failing that, do default to the customary RA. */
++ if (return_reg == -1)
++ return_reg = SW64_RA_REGNUM;
++ info->return_reg = return_reg;
++
++ val = get_frame_register_unsigned (this_frame, frame_reg);
++ info->vfp = val + frame_size;
++
++ /* Convert offsets to absolute addresses. See above about adding
++ one to the offsets to make all detected offsets non-zero. */
++ for (reg = 0; reg < SW64_NUM_REGS; ++reg)
++ if (info->saved_regs[reg].is_addr ())
++ info->saved_regs[reg].set_addr (info->saved_regs[reg].addr ()
++ + val - 1);
++
++ /* The stack pointer of the previous frame is computed by popping
++ the current stack frame. */
++ if (!info->saved_regs[SW64_SP_REGNUM].is_addr ())
++ info->saved_regs[SW64_SP_REGNUM].set_value (info->vfp);
++
++#ifndef LHX20240710
++ if (frame_reg != SW64_SP_REGNUM )
++ info->saved_regs[SW64_SP_REGNUM].set_addr (info->saved_regs[frame_reg].addr());
++#endif
++
++ return info;
++}
++
++/* Given a GDB frame, determine the address of the calling function's
++ frame. This will be used to create a new GDB frame struct. */
++
++static void
++sw64_heuristic_frame_this_id (frame_info_ptr this_frame,
++ void **this_prologue_cache,
++ struct frame_id *this_id)
++{
++ struct sw64_heuristic_unwind_cache *info
++ = sw64_heuristic_frame_unwind_cache (this_frame, this_prologue_cache, 0);
++
++ *this_id = frame_id_build (info->vfp, info->start_pc);
++}
++
++/* Retrieve the value of REGNUM in FRAME. Don't give up! */
++
++static struct value *
++sw64_heuristic_frame_prev_register (frame_info_ptr this_frame,
++ void **this_prologue_cache, int regnum)
++{
++ struct sw64_heuristic_unwind_cache *info
++ = sw64_heuristic_frame_unwind_cache (this_frame, this_prologue_cache, 0);
++
++ /* The PC of the previous frame is stored in the link register of
++ the current frame. Frob regnum so that we pull the value from
++ the correct place. */
++ if (regnum == SW64_PC_REGNUM)
++ regnum = info->return_reg;
++
++ return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
++}
++
++static const struct frame_unwind sw64_heuristic_frame_unwind =
++{
++ "sw64 prologue",
++ NORMAL_FRAME,
++ default_frame_unwind_stop_reason,
++ sw64_heuristic_frame_this_id,
++ sw64_heuristic_frame_prev_register,
++ NULL,
++ default_frame_sniffer
++};
++
++static CORE_ADDR
++sw64_heuristic_frame_base_address (frame_info_ptr this_frame,
++ void **this_prologue_cache)
++{
++ struct sw64_heuristic_unwind_cache *info
++ = sw64_heuristic_frame_unwind_cache (this_frame, this_prologue_cache, 0);
++
++ return info->vfp;
++}
++
++static const struct frame_base sw64_heuristic_frame_base = {
++ &sw64_heuristic_frame_unwind,
++ sw64_heuristic_frame_base_address,
++ sw64_heuristic_frame_base_address,
++ sw64_heuristic_frame_base_address
++};
++
++/* Just like reinit_frame_cache, but with the right arguments to be
++ callable as an sfunc. Used by the "set heuristic-fence-post" command. */
++
++static void
++reinit_frame_cache_sfunc (const char *args,
++ int from_tty, struct cmd_list_element *c)
++{
++ reinit_frame_cache ();
++}
++
++/* Helper routines for sw64*-nat.c files to move register sets to and
++ from core files. The UNIQUE pointer is allowed to be NULL, as most
++ targets don't supply this value in their core files. */
++
++void
++sw64_supply_int_regs (struct regcache *regcache, int regno,
++ const void *r0_r30, const void *pc, const void *unique)
++{
++ const gdb_byte *regs = (const gdb_byte *) r0_r30;
++ int i;
++
++ for (i = 0; i < 31; ++i)
++ if (regno == i || regno == -1)
++ regcache->raw_supply (i, regs + i * 8);
++
++ if (regno == SW64_ZERO_REGNUM || regno == -1)
++ {
++ const gdb_byte zero[8] = { 0 };
++
++ regcache->raw_supply (SW64_ZERO_REGNUM, zero);
++ }
++
++ if (regno == SW64_PC_REGNUM || regno == -1)
++ regcache->raw_supply (SW64_PC_REGNUM, pc);
++
++ if (regno == SW64_UNIQUE_REGNUM || regno == -1)
++ regcache->raw_supply (SW64_UNIQUE_REGNUM, unique);
++}
++
++void
++sw64_fill_int_regs (const struct regcache *regcache,
++ int regno, void *r0_r30, void *pc, void *unique)
++{
++ gdb_byte *regs = (gdb_byte *) r0_r30;
++ int i;
++
++ for (i = 0; i < 31; ++i)
++ if (regno == i || regno == -1)
++ regcache->raw_collect (i, regs + i * 8);
++
++ if (regno == SW64_PC_REGNUM || regno == -1)
++ regcache->raw_collect (SW64_PC_REGNUM, pc);
++
++ if (unique && (regno == SW64_UNIQUE_REGNUM || regno == -1))
++ regcache->raw_collect (SW64_UNIQUE_REGNUM, unique);
++}
++
++void
++sw64_supply_fp_regs (struct regcache *regcache, int regno,
++ const void *f0_f30, const void *fpcr)
++{
++ const gdb_byte *regs = (const gdb_byte *) f0_f30;
++ int i;
++
++ for (i = SW64_FP0_REGNUM; i < SW64_FP0_REGNUM + 31; ++i)
++ if (regno == i || regno == -1)
++ regcache->raw_supply (i, regs + (i - SW64_FP0_REGNUM) * 8);
++
++ if (regno == SW64_FPCR_REGNUM || regno == -1)
++ regcache->raw_supply (SW64_FPCR_REGNUM, fpcr);
++}
++
++void
++sw64_fill_fp_regs (const struct regcache *regcache,
++ int regno, void *f0_f30, void *fpcr)
++{
++ gdb_byte *regs = (gdb_byte *) f0_f30;
++ int i;
++
++ for (i = SW64_FP0_REGNUM; i < SW64_FP0_REGNUM + 31; ++i)
++ if (regno == i || regno == -1)
++ regcache->raw_collect (i, regs + (i - SW64_FP0_REGNUM) * 8);
++
++ if (regno == SW64_FPCR_REGNUM || regno == -1)
++ regcache->raw_collect (SW64_FPCR_REGNUM, fpcr);
++}
++
++
++
++/* Return nonzero if the G_floating register value in REG is equal to
++ zero for FP control instructions. */
++
++static int
++fp_register_zero_p (LONGEST reg)
++{
++ /* Check that all bits except the sign bit are zero. */
++ const LONGEST zero_mask = ((LONGEST) 1 << 63) ^ -1;
++
++ return ((reg & zero_mask) == 0);
++}
++
++/* Return the value of the sign bit for the G_floating register
++ value held in REG. */
++
++static int
++fp_register_sign_bit (LONGEST reg)
++{
++ const LONGEST sign_mask = (LONGEST) 1 << 63;
++
++ return ((reg & sign_mask) != 0);
++}
++
++/* sw64_software_single_step() is called just before we want to resume
++ the inferior, if we want to single-step it but there is no hardware
++ or kernel single-step support (NetBSD on SW64, for example). We find
++ the target of the coming instruction and breakpoint it. */
++
++static CORE_ADDR
++sw64_next_pc (struct regcache *regcache, CORE_ADDR pc)
++{
++ struct gdbarch *gdbarch = regcache->arch ();
++ unsigned int insn;
++ unsigned int op;
++ int regno;
++ int offset;
++ LONGEST rav;
++
++ insn = sw64_read_insn (gdbarch, pc);
++
++ /* Opcode is top 6 bits. */
++ op = (insn >> 26) & 0x3f;
++
++#ifndef LHX20240710
++ switch (op) {
++ case 0x1: /* CALL */
++ case 0x2: /* RET */
++ case 0x3: /* JMP */
++ return (regcache_raw_get_unsigned (regcache, (insn >> 16) & 0x1f) & ~3);
++ case 0x1d: /* LBR */
++ offset = (insn & 0x03ffffff);
++ if (offset & 0x02000000)
++ offset |= 0xfc000000;
++ offset *= SW64_INSN_SIZE;
++ return (pc + SW64_INSN_SIZE + offset);
++ case 0x4: /* BR */
++ case 0x5: /* BSR */
++ branch_taken:
++ offset = (insn & 0x001fffff);
++ if (offset & 0x00100000)
++ offset |= 0xffe00000;
++ offset *= SW64_INSN_SIZE;
++ return (pc + SW64_INSN_SIZE + offset);
++ }
++
++ /* Need to determine if branch is taken; read RA. */
++ if ((op & 0x30) == 0x30) {
++ regno = (insn >> 21) & 0x1f;
++ switch (op)
++ {
++ case 0x38: /* FBEQ */
++ case 0x39: /* FBNE */
++ case 0x3a: /* FBLT */
++ case 0x3b: /* FBLE */
++ case 0x3c: /* FBGT */
++ case 0x3d: /* FBGE */
++ regno += gdbarch_fp0_regnum (gdbarch);
++ }
++
++ rav = regcache_raw_get_signed (regcache, regno);
++
++ switch (op)
++ {
++ case 0x36: /* BLBC */
++ if ((rav & 1) == 0)
++ goto branch_taken;
++ break;
++ case 0x37: /* BLBS */
++ if (rav & 1)
++ goto branch_taken;
++ break;
++ case 0x30: /* BEQ */
++ if (rav == 0)
++ goto branch_taken;
++ break;
++ case 0x31: /* BNE */
++ if (rav != 0)
++ goto branch_taken;
++ break;
++ case 0x32: /* BLT */
++ if (rav < 0)
++ goto branch_taken;
++ break;
++ case 0x33: /* BLE */
++ if (rav <= 0)
++ goto branch_taken;
++ break;
++ case 0x34: /* BGT */
++ if (rav > 0)
++ goto branch_taken;
++ break;
++ case 0x35: /* BGE */
++ if (rav >= 0)
++ goto branch_taken;
++ break;
++
++ /* Floating point branches. */
++ case 0x38: /* FBEQ */
++ if (fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ case 0x3d: /* FBGE */
++ if (fp_register_sign_bit (rav) == 0 || fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ case 0x3c: /* FBGT */
++ if (fp_register_sign_bit (rav) == 0 && ! fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ case 0x3b: /* FBLE */
++ if (fp_register_sign_bit (rav) == 1 || fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ case 0x3a: /* FBLT */
++ if (fp_register_sign_bit (rav) == 1 && ! fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ case 0x39: /* FBNE */
++ if (! fp_register_zero_p (rav))
++ goto branch_taken;
++ break;
++ }
++ }
++#endif
++
++ /* Not a branch or branch not taken; target PC is:
++ pc + 4 */
++ return (pc + SW64_INSN_SIZE);
++}
++
++std::vector
++sw64_software_single_step (struct regcache *regcache)
++{
++ struct gdbarch *gdbarch = regcache->arch ();
++
++ CORE_ADDR pc = regcache_read_pc (regcache);
++
++ std::vector next_pcs
++ = sw64_deal_with_atomic_sequence (gdbarch, pc);
++ if (!next_pcs.empty ())
++ return next_pcs;
++
++ CORE_ADDR next_pc = sw64_next_pc (regcache, pc);
++ return {next_pc};
++}
++
++
++/* Initialize the current architecture based on INFO. If possible, re-use an
++ architecture from ARCHES, which is a list of architectures already created
++ during this debugging session.
++
++ Called e.g. at program startup, when reading a core file, and when reading
++ a binary file. */
++
++static struct gdbarch *
++sw64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
++{
++ //struct gdbarch *gdbarch;
++
++ /* Find a candidate among extant architectures. */
++ arches = gdbarch_list_lookup_by_info (arches, &info);
++ if (arches != NULL)
++ return arches->gdbarch;
++
++ gdbarch *gdbarch
++ = gdbarch_alloc (&info, gdbarch_tdep_up (new sw64_gdbarch_tdep));
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
++// sw64_gdbarch_tdep *tdep = new sw64_gdbarch_tdep;
++// gdbarch = gdbarch_alloc (&info, tdep);
++
++ /* Lowest text address. This is used by heuristic_proc_start()
++ to decide when to stop looking. */
++ tdep->vm_min_address = (CORE_ADDR) 0x120000000LL;
++
++ tdep->dynamic_sigtramp_offset = NULL;
++ tdep->sigcontext_addr = NULL;
++ tdep->sc_pc_offset = 2 * 8;
++ tdep->sc_regs_offset = 4 * 8;
++ tdep->sc_fpregs_offset = tdep->sc_regs_offset + 32 * 8 + 8;
++
++ tdep->jb_pc = -1; /* longjmp support not enabled by default. */
++
++ tdep->return_in_memory = sw64_return_in_memory_always;
++
++#ifndef LHX20240710
++ tdep->sw64_vec_type = 0;
++#endif
++
++ /* Type sizes */
++ set_gdbarch_short_bit (gdbarch, 16);
++ set_gdbarch_int_bit (gdbarch, 32);
++ set_gdbarch_long_bit (gdbarch, 64);
++ set_gdbarch_long_long_bit (gdbarch, 64);
++ set_gdbarch_wchar_bit (gdbarch, 64);
++ set_gdbarch_wchar_signed (gdbarch, 0);
++ set_gdbarch_float_bit (gdbarch, 32);
++ set_gdbarch_double_bit (gdbarch, 64);
++ set_gdbarch_long_double_bit (gdbarch, 64);
++ set_gdbarch_ptr_bit (gdbarch, 64);
++
++ /* Register info */
++ set_gdbarch_num_regs (gdbarch, SW64_NUM_REGS);
++ set_gdbarch_sp_regnum (gdbarch, SW64_SP_REGNUM);
++ set_gdbarch_pc_regnum (gdbarch, SW64_PC_REGNUM);
++ set_gdbarch_fp0_regnum (gdbarch, SW64_FP0_REGNUM);
++
++ set_gdbarch_register_name (gdbarch, sw64_register_name);
++ set_gdbarch_register_type (gdbarch, sw64_register_type);
++
++ set_gdbarch_cannot_fetch_register (gdbarch, sw64_cannot_fetch_register);
++ set_gdbarch_cannot_store_register (gdbarch, sw64_cannot_store_register);
++
++ set_gdbarch_convert_register_p (gdbarch, sw64_convert_register_p);
++ set_gdbarch_register_to_value (gdbarch, sw64_register_to_value);
++ set_gdbarch_value_to_register (gdbarch, sw64_value_to_register);
++
++ set_gdbarch_register_reggroup_p (gdbarch, sw64_register_reggroup_p);
++
++ /* Prologue heuristics. */
++ set_gdbarch_skip_prologue (gdbarch, sw64_skip_prologue);
++
++#ifndef LHX20240710
++ set_gdbarch_num_pseudo_regs (gdbarch, NVEC_REGS);
++ set_gdbarch_pseudo_register_read (gdbarch, sw64_vec_register_read);
++ set_gdbarch_pseudo_register_write (gdbarch, sw64_vec_register_write);
++ set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
++#endif
++
++ /* Call info. */
++
++ set_gdbarch_return_value (gdbarch, sw64_return_value);
++
++ /* Settings for calling functions in the inferior. */
++ set_gdbarch_push_dummy_call (gdbarch, sw64_push_dummy_call);
++
++ set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
++ set_gdbarch_skip_trampoline_code (gdbarch, find_solib_trampoline_target);
++
++#ifndef LHX20240710
++ set_gdbarch_skip_permanent_breakpoint (gdbarch, sw64_skip_permanent_breakpoint);
++#endif
++
++ set_gdbarch_breakpoint_kind_from_pc (gdbarch,
++ sw64_breakpoint::kind_from_pc);
++ set_gdbarch_sw_breakpoint_from_kind (gdbarch,
++ sw64_breakpoint::bp_from_kind);
++ set_gdbarch_decr_pc_after_break (gdbarch, SW64_INSN_SIZE);
++ set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
++
++#ifndef LHX20240710
++ set_gdbarch_num_pseudo_regs (gdbarch, NVEC_REGS);
++ set_gdbarch_pseudo_register_read (gdbarch, sw64_vec_register_read);
++ set_gdbarch_pseudo_register_write (gdbarch, sw64_vec_register_write);
++ set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
++#endif
++
++ /* Handles single stepping of atomic sequences. */
++ set_gdbarch_software_single_step (gdbarch, sw64_software_single_step);
++
++ /* Hook in ABI-specific overrides, if they have been registered. */
++ gdbarch_init_osabi (info, gdbarch);
++
++ /* Now that we have tuned the configuration, set a few final things
++ based on what the OS ABI has told us. */
++
++ if (tdep->jb_pc >= 0)
++ set_gdbarch_get_longjmp_target (gdbarch, sw64_get_longjmp_target);
++
++ frame_unwind_append_unwinder (gdbarch, &sw64_sigtramp_frame_unwind);
++ frame_unwind_append_unwinder (gdbarch, &sw64_heuristic_frame_unwind);
++
++ frame_base_set_default (gdbarch, &sw64_heuristic_frame_base);
++
++ return gdbarch;
++}
++
++void
++sw64_dwarf2_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
++{
++ dwarf2_append_unwinders (gdbarch);
++ frame_base_append_sniffer (gdbarch, dwarf2_frame_base_sniffer);
++}
++
++void _initialize_sw64_tdep ();
++void
++_initialize_sw64_tdep ()
++{
++
++ gdbarch_register (bfd_arch_sw64, sw64_gdbarch_init, NULL);
++
++ /* Let the user set the fence post for heuristic_proc_start. */
++
++ /* We really would like to have both "0" and "unlimited" work, but
++ command.c doesn't deal with that. So make it a var_zinteger
++ because the user can always use "999999" or some such for unlimited. */
++ /* We need to throw away the frame cache when we set this, since it
++ might change our ability to get backtraces. */
++ add_setshow_zinteger_cmd ("heuristic-fence-post", class_support,
++ &heuristic_fence_post, _("\
++Set the distance searched for the start of a function."), _("\
++Show the distance searched for the start of a function."), _("\
++If you are debugging a stripped executable, GDB needs to search through the\n\
++program for the start of a function. This command sets the distance of the\n\
++search. The only need to set it is when debugging a stripped executable."),
++ reinit_frame_cache_sfunc,
++ NULL, /* FIXME: i18n: The distance searched for
++ the start of a function is \"%d\". */
++ &setlist, &showlist);
++}
++
++#ifndef LHX20240716_record
++/* SW64 process record-replay related structures, defines etc. */
++
++#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
++ do \
++ { \
++ unsigned int reg_len = LENGTH; \
++ if (reg_len) \
++ { \
++ REGS = XNEWVEC (uint32_t, reg_len); \
++ memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
++ } \
++ } \
++ while (0)
++
++#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
++ do \
++ { \
++ unsigned int mem_len = LENGTH; \
++ if (mem_len) \
++ { \
++ MEMS = XNEWVEC (struct sw64_mem_r, mem_len); \
++ memcpy(&MEMS->len, &RECORD_BUF[0], \
++ sizeof(struct sw64_mem_r) * LENGTH); \
++ } \
++ } \
++ while (0)
++
++/* SW64 record/replay structures and enumerations. */
++
++struct sw64_mem_r
++{
++ uint64_t len; /* Record length. */
++ uint64_t addr; /* Memory address. */
++};
++
++enum sw64_record_result
++{
++ SW64_RECORD_SUCCESS,
++ SW64_RECORD_UNSUPPORTED,
++ SW64_RECORD_UNKNOWN
++};
++
++struct sw64_insn_decode_record
++{
++ struct gdbarch *gdbarch;
++ struct regcache *regcache;
++ CORE_ADDR this_addr; /* Address of insn to be recorded. */
++ uint32_t sw64_insn; /* Insn to be recorded. */
++ uint32_t mem_rec_count; /* Count of memory records. */
++ uint32_t reg_rec_count; /* Count of register records. */
++ uint32_t *sw64_regs; /* Registers to be recorded. */
++ struct sw64_mem_r *sw64_mems; /* Memory locations to be recorded. */
++};
++
++static unsigned int
++sw64_record_data_proc_reg (sw64_insn_decode_record *sw64_insn_r)
++{
++ uint8_t insn_bits21_25;
++ insn_bits21_25 = (rigg (sw64_insn_r->sw64_insn, 21) & 0x1F);
++ uint8_t ins_bits5_12;
++ ins_bits5_12 = (rigg(sw64_insn_r->sw64_insn,5) & 0xff);
++ uint32_t ins_bit31_26;
++ ins_bit31_26 = rigg (sw64_insn_r->sw64_insn,26);
++ uint8_t ins_bits0_4;
++ ins_bits0_4 = (sw64_insn_r->sw64_insn & 0x1F);
++ uint32_t record_buf[4];
++ record_buf[0] = ins_bits0_4;
++ if(ins_bit31_26 == 0x18)
++ {
++ if(ins_bits5_12 == 0x50)
++ {
++ record_buf[0] = insn_bits21_25+32;
++ }
++ else if(ins_bits5_12 == 0x51 || (0x54 <= ins_bits5_12 && ins_bits5_12 <= 0x57))
++ {
++ record_buf[0] = SW64_FPCR_REGNUM;
++ }
++ else
++ {
++ record_buf[0] = ins_bits0_4+32;
++ }
++ }
++ if(0x14 <= ins_bit31_26 && ins_bit31_26<= 0x17)
++ {
++ record_buf[0] = ins_bits0_4;//+167;
++ }
++ if(ins_bit31_26 == 0x1a)
++ {
++ if(ins_bits5_12 == 0x2 || ins_bits5_12 == 0x22 || ins_bits5_12 == 0x18 || ins_bits5_12 == 0x19)
++ {
++ record_buf[0] = ins_bits0_4+32;
++ }
++ else
++ {
++ record_buf[0] = ins_bits0_4;//+167;
++ }
++ }
++ if(ins_bit31_26 == 0x1b)
++ {
++ if(ins_bits5_12 == 0x22 || ins_bits5_12 == 0x23)
++ {
++ record_buf[0] = ins_bits0_4+32;
++ }
++ record_buf[0] = ins_bits0_4;//+167;
++ }
++ sw64_insn_r->reg_rec_count = 1;
++ REG_ALLOC (sw64_insn_r->sw64_regs, sw64_insn_r->reg_rec_count,record_buf);
++ return SW64_RECORD_SUCCESS;
++}
++
++static unsigned int
++sw64_record_data_proc_imm (sw64_insn_decode_record *sw64_insn_r)
++{
++ sw64_gdbarch_tdep *tdep = gdbarch_tdep (sw64_insn_r->gdbarch);
++ if(sw64_insn_r->sw64_insn == 0x200009e)
++ {
++ for(int i=0;i<32;i++)
++ {
++ record_full_arch_list_add_reg(sw64_insn_r->regcache,i);
++ }
++ return SW64_RECORD_SUCCESS;
++ }
++ if (tdep->sw64_syscall_record != NULL)
++ {
++ return tdep->sw64_syscall_record (sw64_insn_r->regcache);
++
++ }
++ else
++ return SW64_RECORD_UNSUPPORTED;
++}
++
++static unsigned int
++sw64_record_branch_except_sys (sw64_insn_decode_record *sw64_insn_r)
++{
++ //struct gdbarch_tdep *tdep = gdbarch_tdep (sw64_insn_r->gdbarch);
++ uint32_t ins_bit31_26;
++ ins_bit31_26 = rigg (sw64_insn_r->sw64_insn, 26);
++ uint32_t record_buf[4];
++ uint8_t insn_bits21_25;
++ insn_bits21_25 = (rigg (sw64_insn_r->sw64_insn, 21) & 0x1F);
++ /*Test & branch*/
++ if (0x30 <= ins_bit31_26 && ins_bit31_26<= 0x3D)
++ {
++ record_buf[sw64_insn_r->reg_rec_count++] = SW64_PC_REGNUM;
++ }
++ else
++ {
++ //record_buf[sw64_insn_r->reg_rec_count++] = SW64_PC_REGNUM;
++ record_buf[sw64_insn_r->reg_rec_count++] = insn_bits21_25;
++ }
++ REG_ALLOC (sw64_insn_r->sw64_regs, sw64_insn_r->reg_rec_count,
++ record_buf);
++ return SW64_RECORD_SUCCESS;
++}
++
++static unsigned int
++sw64_record_load_store (sw64_insn_decode_record *sw64_insn_r)
++{
++ uint8_t bit12;
++ bit12=bit(sw64_insn_r->sw64_insn, 12);
++ uint8_t insn_bits12_15;
++ insn_bits12_15=(rigg(sw64_insn_r->sw64_insn, 12) & 0xf);
++ uint32_t insn_bits0_15;
++ insn_bits0_15 = (sw64_insn_r->sw64_insn & 0xffff);
++ uint8_t insn_bits21_25;
++ uint8_t insn_bits16_20;
++ uint8_t insn_bits8_15;
++ insn_bits8_15 = (rigg(sw64_insn_r->sw64_insn, 8) & 0xff);
++ uint32_t ins_bit31_26;
++ ins_bit31_26 = rigg (sw64_insn_r->sw64_insn, 26);
++ insn_bits16_20 = (rigg(sw64_insn_r->sw64_insn, 16) & 0x1f);
++ uint8_t insn_bit0_16;
++ insn_bit0_16=(sw64_insn_r->sw64_insn & 0xffff);
++ uint32_t record_buf[8];
++ uint64_t record_buf_mem[8];
++ CORE_ADDR address;
++ int save_size;
++ insn_bits21_25 = (rigg (sw64_insn_r->sw64_insn, 21) & 0x1F);//
++ int Byte4_len;
++ int disp15,disp11;
++ disp15 = sw64_insn_r->sw64_insn & 0xffff;
++ disp11 = sw64_insn_r->sw64_insn & 0xfff;
++ if(sw64_insn_r->sw64_insn & 0x8000)
++ {
++ disp15 |= 0xffff0000;
++ }
++ if(sw64_insn_r->sw64_insn & 0x800)
++ {
++ disp11 |= 0xfffff000;
++ }
++ /*load imme , transfer*/
++ if ((0x3E <= ins_bit31_26 && ins_bit31_26<= 0x3F) || ins_bit31_26 ==0x07 || ins_bit31_26 == 0x25)
++ {
++ // if (record_debug)
++ // debug_printf ("Process record: load register (literal)\n");
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else if((0x09 <= ins_bit31_26 && ins_bit31_26<= 0x0F) || ins_bit31_26 == 0x1C)//simd save and load
++ {
++ if(0x09 <= ins_bit31_26 && ins_bit31_26<= 0x0D)
++ {
++ record_buf[0] = insn_bits21_25;//+167;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else if(0x0E == ins_bit31_26 || ins_bit31_26 == 0x0F || ins_bit31_26 == 0x1C)
++ {
++ if(ins_bit31_26 == 0x1C && insn_bits12_15 == 0xe)
++ {
++ regcache_raw_read_unsigned (sw64_insn_r->regcache, insn_bits16_20,
++ &address);
++ address+=disp11;
++ record_buf_mem[0] = 32;
++ record_buf_mem[1] = address;
++ sw64_insn_r->mem_rec_count = 1;
++ }
++ regcache_raw_read_unsigned (sw64_insn_r->regcache, insn_bits16_20,
++ &address);
++ if(ins_bit31_26 == 0xe && ins_bit31_26 == 0xf)
++ {
++ address+=disp15;
++ }
++ address+=(sw64_insn_r->sw64_insn & 0xfff);
++ if(ins_bit31_26 == 0x1C)
++ {
++ switch(insn_bits12_15)
++ {
++ case 0x9:
++ case 0x8:
++ case 0x1:
++ Byte4_len = ((address & 0x1f) >> 2);
++ address=address & 0xffffffffffffffe0;
++ break;
++ case 0xb:
++ case 0xa:
++ case 0x3:
++ Byte4_len = ((address & 0xf) >> 2);
++ address=address & 0xfffffffffffffff0;
++ break;
++ case 0xf:
++ save_size=32;
++ break;
++ case 0xd:
++ case 0xc:
++ case 0x5:
++ Byte4_len = ((address & 0x1f) >> 3);
++ address=address & 0xffffffffffffffe0;
++ break;
++ default:
++ goto simdsave;
++ break;
++ }
++ }
++ switch(ins_bit31_26)
++ {
++ case 0x0e:
++ save_size=16;
++ break;
++ case 0xf:
++ save_size=32;
++ break;
++ case 0x1C:
++ switch(insn_bits12_15)
++ {
++ case 0x01:
++ save_size=32;
++ break;
++ case 0x03:
++ /* fallthrough */
++ case 0x05:
++ save_size=16;
++ break;
++ case 0x8:
++ save_size=32-4*Byte4_len;
++ address+=Byte4_len*4;
++ break;
++ case 0xf:
++ save_size=32;
++ break;
++ case 0xc:
++ /* fallthrough */
++ case 0xA:
++ save_size=4*(4-Byte4_len);
++ address+=Byte4_len*4;
++ break;
++ case 0xB:
++ /* fallthrough */
++ case 0x9:
++ /* fallthrough */
++ case 0xd:
++ if(Byte4_len == 0)
++ {
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ else
++ {
++ save_size = Byte4_len*4;
++ }
++ break;
++ default:
++ goto simdsave;
++ break;
++ }
++ /* fallthrough */
++ default:
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ record_buf_mem[0] = save_size;
++ record_buf_mem[1] = address;
++ sw64_insn_r->mem_rec_count = 1;
++ }
++ }
++ else if(0x01 <= ins_bit31_26 && ins_bit31_26<= 0x03)
++ {
++// record_buf[sw64_insn_r->reg_rec_count++] = SW64_PC_REGNUM;
++ record_buf[sw64_insn_r->reg_rec_count++] = insn_bits21_25;
++ goto log;
++ }
++ else if(ins_bit31_26 == 0x06 || ins_bit31_26 == 0x08 || ins_bit31_26 == 0x2D)//misc
++ {
++ if(ins_bit31_26 == 0x06)
++ {
++ switch(insn_bits0_15)
++ {
++ case 0x0:
++ case 0x1:
++ case 0x80:
++ return SW64_RECORD_UNSUPPORTED;
++ case 0x20:
++ case 0x40:
++ case 0x1040:
++ goto simdsave;
++ default:
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ }
++ if(ins_bit31_26 == 0x06 && (insn_bits12_15 == 0x1000 || insn_bits12_15 == 0x1020))//lock
++ return SW64_RECORD_UNSUPPORTED;
++
++ else if(ins_bit31_26 == 0x08 && (insn_bits12_15 == 0x0 || insn_bits12_15 == 0x1 || insn_bits12_15 == 0x8 || insn_bits12_15 == 0x9))//lock
++ {
++ // if(insn_bits12_15 == 0x0 || insn_bits12_15 == 0x1)
++ // {
++ // record_buf[0] = insn_bits21_25;
++ // sw64_insn_r->reg_rec_count = 1;
++ // }
++ // else
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ else if(ins_bit31_26 == 0x06 && (insn_bits8_15 == 0xff || insn_bits8_15 == 0xfe))
++ {
++ /*if(insn_bits8_15 == 0xff)
++ {
++ record_buf[0] = SW_CSR_REGNUM;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else
++ {
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++ }*/
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ else if(ins_bit31_26 == 0x07 || ins_bit31_26 == 0x25 || ins_bit31_26 == 0x2D)
++ return SW64_RECORD_UNSUPPORTED;//PRI__
++ else if(ins_bit31_26 == 0x08 && (0xA <= insn_bits12_15 && insn_bits12_15 <=0xC))//no catch
++ {
++ if(insn_bits12_15 == 0xC)
++ record_buf[0] = insn_bits21_25+32;
++ else
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else if(insn_bit0_16 == 0x20 || insn_bit0_16 == 0x40 || insn_bit0_16 == 0x1040)
++ {
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else if (ins_bit31_26 == 0x08 && ((0x2 <=insn_bits12_15 && insn_bits12_15 <= 0x7) || (0xd <=insn_bits12_15 && insn_bits12_15 <= 0xf)))
++ {
++ switch(insn_bits12_15)
++ {
++ case 0x2:
++ case 0x4:
++ case 0x6:
++ case 0xd:
++ save_size=4;
++ break;
++ case 0x3:
++ case 0x5:
++ case 0x7:
++ case 0xe:
++ case 0xf:
++ save_size=8;
++ break;
++ default:
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ if(bit12 == 0)
++ {
++ save_size=4;
++ }
++ else
++ {
++ save_size=8;
++ }
++ regcache_raw_read_unsigned (sw64_insn_r->regcache, insn_bits16_20,
++ &address);
++ address+=disp11;
++ record_buf_mem[0] = save_size;
++ record_buf_mem[1] = address;
++ sw64_insn_r->mem_rec_count = 1;
++ }
++ else
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ else if(ins_bit31_26 == 0x26 || ins_bit31_26 == 0x27)//float save
++ {
++ record_buf[0] = insn_bits21_25+32;
++ sw64_insn_r->reg_rec_count = 1;
++ goto log;
++ }
++ else
++ {
++ if (record_debug)
++ debug_printf ("Process record: load/store exclusive\n");
++
++ if (0x20 <= ins_bit31_26 && ins_bit31_26<= 0x24)
++ {
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++ }
++ else
++ {
++ switch(ins_bit31_26)
++ {
++ case 0x28:
++ save_size=1;
++ break;
++ case 0x29:
++ save_size=2;
++ break;
++ case 0x2A:
++ case 0x2E:
++ save_size=4;
++ break;
++ case 0x2B:
++ case 0x2F:
++ case 0x24:
++ save_size=8;
++ break;
++ default:
++ return SW64_RECORD_UNSUPPORTED;
++ }
++ regcache_raw_read_unsigned (sw64_insn_r->regcache, insn_bits16_20,
++ &address);
++ address+=disp15;
++ switch(ins_bit31_26)
++ {
++ case 0x24:
++ address&=0xfffffffffffffff8;
++ }
++ record_buf_mem[0] = save_size;
++ record_buf_mem[1] = address;
++ sw64_insn_r->mem_rec_count = 1;
++ goto log;
++ }
++ }
++simdsave:
++ record_buf[0] = insn_bits21_25;
++ sw64_insn_r->reg_rec_count = 1;
++log:
++ MEM_ALLOC (sw64_insn_r->sw64_mems, sw64_insn_r->mem_rec_count,
++ record_buf_mem);
++ REG_ALLOC (sw64_insn_r->sw64_regs, sw64_insn_r->reg_rec_count,
++ record_buf);
++ return SW64_RECORD_SUCCESS;
++}
++
++static unsigned int
++sw64_record_data_proc_simd_fp (sw64_insn_decode_record *sw64_insn_r)
++{
++ uint8_t ins_bits0_4;
++ ins_bits0_4 = (sw64_insn_r->sw64_insn & 0x1F);
++ uint32_t record_buf[4];
++ record_buf[0] = ins_bits0_4;
++ sw64_insn_r->reg_rec_count = 1;
++ REG_ALLOC (sw64_insn_r->sw64_regs, sw64_insn_r->reg_rec_count,record_buf);
++ return SW64_RECORD_SUCCESS;
++}
++
++/* Decodes insns type and invokes its record handler. */
++static unsigned int
++sw64_record_decode_insn_handler (sw64_insn_decode_record *sw64_insn_r)
++{
++
++
++ uint32_t ins_bit31_26;
++ ins_bit31_26 = rigg (sw64_insn_r->sw64_insn, 26);
++ /*five differrent instruction format*/
++/* system format. */
++ if (ins_bit31_26 == 0)
++ return sw64_record_data_proc_imm (sw64_insn_r);
++
++ /* transfer format */
++ if (ins_bit31_26 == 0x04 || ins_bit31_26 == 0x05 || (0x30 <= ins_bit31_26 && ins_bit31_26<= 0x3D))
++ return sw64_record_branch_except_sys (sw64_insn_r);
++
++ /* Load and store format. */
++ if ((0x01 <= ins_bit31_26 && ins_bit31_26<= 0x03)/*jump*/|| (0x28 <= ins_bit31_26 && ins_bit31_26<= 0x2C)//save and load
++ || (0x20 <= ins_bit31_26 && ins_bit31_26<= 0x24)
++ || (ins_bit31_26 == 0x25)//PRI_LD
++ || (0x26 <= ins_bit31_26 && ins_bit31_26<= 0x27)
++ || (0x2D == ins_bit31_26)//PRI_ST
++ || (0x2E <= ins_bit31_26 && ins_bit31_26<= 0x2F)
++ || (0x3E <= ins_bit31_26 && ins_bit31_26<= 0x3F)//load immedia
++ || (ins_bit31_26 == 0x06)//PRI_RWCSR
++ || (ins_bit31_26 == 0x07)//PRI_RET:W
++ || (ins_bit31_26 == 0x08)//misc
++ || (0x09 <= ins_bit31_26 && ins_bit31_26<= 0x0F)//simd load and save
++ || (ins_bit31_26 == 0x1c))
++ return sw64_record_load_store (sw64_insn_r);
++
++ /* simple calculate format */
++ if (ins_bit31_26 == 0x10 || ins_bit31_26 == 0x12//integer calculate
++ || ins_bit31_26 == 0x18//float calculate
++ || ins_bit31_26 == 0x1A//integer or float simd calculate
++ || (0x14 <= ins_bit31_26 && ins_bit31_26<= 0x17))//reconfig
++ return sw64_record_data_proc_reg (sw64_insn_r);
++
++ /* combination calculate format */
++ if (ins_bit31_26 == 0x1B || ins_bit31_26 == 0x19 || ins_bit31_26 == 0x11 || ins_bit31_26 == 0x13)
++ return sw64_record_data_proc_simd_fp (sw64_insn_r);
++
++ return SW64_RECORD_UNSUPPORTED;
++}
++
++static void
++deallocate_reg_mem (sw64_insn_decode_record *record)
++{
++ xfree (record->sw64_regs);
++ xfree (record->sw64_mems);
++}
++
++int
++sw64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
++ CORE_ADDR insn_addr)
++{
++ uint32_t rec_no = 0;
++ uint8_t insn_size = 4;
++ uint32_t ret = 0;
++ gdb_byte buf[insn_size];
++ sw64_insn_decode_record sw64_record;
++
++ memset (&buf[0], 0, insn_size);
++ memset (&sw64_record, 0, sizeof (sw64_insn_decode_record));
++ target_read_memory (insn_addr, &buf[0], insn_size);
++ sw64_record.sw64_insn
++ = (uint32_t) extract_unsigned_integer (&buf[0],
++ insn_size,
++ gdbarch_byte_order (gdbarch));
++
++ if (record_debug > 1)
++ gdb_printf (gdb_stderr, "Process record: sw64_process_record "
++ "addr = %s\n",
++ paddress (gdbarch, insn_addr));
++
++ sw64_record.regcache = regcache;
++ sw64_record.this_addr = insn_addr;
++ sw64_record.gdbarch = gdbarch;
++
++ ret = sw64_record_decode_insn_handler (&sw64_record);
++ if (ret == SW64_RECORD_UNSUPPORTED)
++ {
++ gdb_printf (gdb_stderr, _("Process record does not support instruction "
++ "0x%0x at address %s.\n"),
++ sw64_record.sw64_insn,
++ paddress (gdbarch, insn_addr));
++ ret = -1;
++ }
++
++ if (0 == ret)
++ {
++ /* Record registers. */
++ record_full_arch_list_add_reg (sw64_record.regcache,
++ SW64_PC_REGNUM);
++ /* Always record register CPSR. */
++// record_full_arch_list_add_reg (sw64_record.regcache,
++// SW_CSR_REGNUM);
++ if (sw64_record.sw64_regs)
++ for (rec_no = 0; rec_no < sw64_record.reg_rec_count; rec_no++)
++ if (record_full_arch_list_add_reg (sw64_record.regcache,
++ sw64_record.sw64_regs[rec_no]))
++ ret = -1;
++
++ /* Record memories. */
++ if (sw64_record.sw64_mems)
++ for (rec_no = 0; rec_no < sw64_record.mem_rec_count; rec_no++)
++ if (record_full_arch_list_add_mem
++ ((CORE_ADDR)sw64_record.sw64_mems[rec_no].addr,
++ sw64_record.sw64_mems[rec_no].len))
++ ret = -1;
++
++ if (record_full_arch_list_add_end ())
++ ret = -1;
++ }
++
++ deallocate_reg_mem (&sw64_record);
++ return ret;
++}
++#endif
+diff -Naur gdb-14.1-after-patch/gdb/sw64-tdep.h gdb-14.1-sw64/gdb/sw64-tdep.h
+--- gdb-14.1-after-patch/gdb/sw64-tdep.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/sw64-tdep.h 2025-03-03 10:59:13.210000000 +0800
+@@ -0,0 +1,153 @@
++/* Common target dependent code for GDB on SW64 systems.
++ Copyright (C) 1993-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#ifndef SW64_TDEP_H
++#define SW64_TDEP_H
++
++#include "gdbarch.h"
++
++struct regcache;
++
++/* Say how long (ordinary) registers are. This is a piece of bogosity
++ used in push_word and a few other places; register_size() is the
++ real way to know how big a register is. */
++#define SW64_REGISTER_SIZE 8
++
++/* Number of machine registers. */
++#define SW64_NUM_REGS 167
++
++/* Register numbers of various important registers. Note that most of
++ these values are "real" register numbers, and correspond to the
++ general registers of the machine. */
++
++#define SW64_V0_REGNUM 0 /* Function integer return value */
++#define SW64_T7_REGNUM 8 /* Return address register for OSF/1 __add* */
++#define SW64_S0_REGNUM 9 /* First saved register */
++#define SW64_GCC_FP_REGNUM 15 /* Used by gcc as frame register */
++#define SW64_A0_REGNUM 16 /* Loc of first arg during a subr call */
++#define SW64_T9_REGNUM 23 /* Return address register for OSF/1 __div* */
++#define SW64_RA_REGNUM 26 /* Contains return address value */
++#define SW64_T12_REGNUM 27 /* Contains start addr of current proc */
++#define SW64_GP_REGNUM 29 /* Contains the global pointer */
++#define SW64_SP_REGNUM 30 /* Contains address of top of stack */
++#define SW64_ZERO_REGNUM 31 /* Read-only register, always 0 */
++#define SW64_FP0_REGNUM 32 /* Floating point register 0 */
++#define SW64_FPA0_REGNUM 48 /* First float arg during a subr call */
++#define SW64_FPCR_REGNUM 63 /* Floating point control register */
++#define SW64_PC_REGNUM 64 /* Contains program counter */
++#define SW64_UNIQUE_REGNUM 66 /* PAL_rduniq value */
++
++#ifndef LHX20240716_record
++#define SW64_CSR_REGNUM 66 /* Used for process-record */
++#endif
++
++#ifndef LHX20240710
++#define SW64_V0F1_REGNUM 67
++#define SW64_V0F2_REGNUM 99
++#define SW64_V0F3_REGNUM 131
++#define SW64_VEC0_REGNUM 167 /* First vector register V0 */
++
++#define REG_BASE 0
++#define NGP_REGS 32
++#define NFP_REGS 32
++#define NVEC_REGS 32
++#define GPR_BASE REG_BASE
++#define FPR_BASE (GPR_BASE+NGP_REGS)
++#endif
++
++/* Instruction size. */
++#define SW64_INSN_SIZE 4
++
++/* The sw64 has two different virtual pointers for arguments and locals.
++
++ The virtual argument pointer is pointing to the bottom of the argument
++ transfer area, which is located immediately below the virtual frame
++ pointer. Its size is fixed for the native compiler, it is either zero
++ (for the no arguments case) or large enough to hold all argument registers.
++ gcc uses a variable sized argument transfer area. As it has
++ to stay compatible with the native debugging tools it has to use the same
++ virtual argument pointer and adjust the argument offsets accordingly.
++
++ The virtual local pointer is localoff bytes below the virtual frame
++ pointer, the value of localoff is obtained from the PDR. */
++#define SW64_NUM_ARG_REGS 6
++
++/* Target-dependent structure in gdbarch. */
++struct sw64_gdbarch_tdep : gdbarch_tdep_base
++{
++ CORE_ADDR vm_min_address = 0; /* Used by sw64_heuristic_proc_start. */
++
++ /* If PC is inside a dynamically-generated signal trampoline function
++ (i.e. one copied onto the user stack at run-time), return how many
++ bytes PC is beyond the start of that function. Otherwise, return -1. */
++ LONGEST (*dynamic_sigtramp_offset) (struct gdbarch *, CORE_ADDR) = nullptr;
++
++ /* Translate a signal handler stack base address into the address of
++ the sigcontext structure for that signal handler. */
++ CORE_ADDR (*sigcontext_addr) (frame_info_ptr) = nullptr;
++
++ /* Does the PC fall in a signal trampoline. */
++ /* NOTE: cagney/2004-04-30: Do not copy/clone this code. Instead
++ look at tramp-frame.h and other simpler per-architecture
++ sigtramp unwinders. */
++ int (*pc_in_sigtramp) (struct gdbarch *gdbarch, CORE_ADDR pc,
++ const char *name) = nullptr;
++
++ /* If TYPE will be returned in memory, return true. */
++ int (*return_in_memory) (struct type *type) = nullptr;
++
++ /* Offset of registers in `struct sigcontext'. */
++ int sc_pc_offset = 0;
++ int sc_regs_offset = 0;
++ int sc_fpregs_offset = 0;
++
++ int jb_pc = 0; /* Offset to PC value in jump buffer.
++ If htis is negative, longjmp support
++ will be disabled. */
++ size_t jb_elt_size = 0; /* And the size of each entry in the buf. */
++
++ /* construct vector register. */
++ struct type *sw64_vec_type;
++
++#ifndef LHX20240716_record
++ /* syscall record. */
++ int (*sw64_syscall_record) (struct regcache *regcache);
++#endif
++};
++
++extern unsigned int sw64_read_insn (struct gdbarch *gdbarch, CORE_ADDR pc);
++extern std::vector sw64_software_single_step
++ (struct regcache *regcache);
++extern CORE_ADDR sw64_after_prologue (CORE_ADDR pc);
++
++extern void sw64_mdebug_init_abi (struct gdbarch_info, struct gdbarch *);
++extern void sw64_dwarf2_init_abi (struct gdbarch_info, struct gdbarch *);
++
++extern void sw64_supply_int_regs (struct regcache *, int, const void *,
++ const void *, const void *);
++extern void sw64_fill_int_regs (const struct regcache *, int,
++ void *, void *, void *);
++extern void sw64_supply_fp_regs (struct regcache *, int,
++ const void *, const void *);
++extern void sw64_fill_fp_regs (const struct regcache *,
++ int, void *, void *);
++#ifndef LHX20240716_record
++extern int sw64_process_record (struct gdbarch *gdbarch,
++ struct regcache *regcache, CORE_ADDR addr);
++#endif
++#endif /* SW64_TDEP_H */
+diff -Naur gdb-14.1-after-patch/gdb/syscalls/sw64-linux.xml gdb-14.1-sw64/gdb/syscalls/sw64-linux.xml
+--- gdb-14.1-after-patch/gdb/syscalls/sw64-linux.xml 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/syscalls/sw64-linux.xml 2025-03-03 10:59:13.210000000 +0800
+@@ -0,0 +1,476 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
+diff -Naur gdb-14.1-after-patch/gdb/syscalls/sw64-linux.xml.in gdb-14.1-sw64/gdb/syscalls/sw64-linux.xml.in
+--- gdb-14.1-after-patch/gdb/syscalls/sw64-linux.xml.in 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdb/syscalls/sw64-linux.xml.in 2025-03-03 10:59:13.210000000 +0800
+@@ -0,0 +1,479 @@
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
diff --git a/gdb-14.1-add-support-for-SW64-003.patch b/gdb-14.1-add-support-for-SW64-003.patch
new file mode 100644
index 0000000000000000000000000000000000000000..10677de907a0ed8d8e53ad32a768bfb00e5d230c
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-003.patch
@@ -0,0 +1,1420 @@
+diff -Naur gdb-14.1-after-patch/opcodes/configure gdb-14.1-sw64/opcodes/configure
+--- gdb-14.1-after-patch/opcodes/configure 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/opcodes/configure 2025-03-03 10:59:13.830000000 +0800
+@@ -12548,6 +12548,7 @@
+ archdefs="$archdefs -DARCH_$ad"
+ case "$arch" in
+ bfd_aarch64_arch) ta="$ta aarch64-asm.lo aarch64-dis.lo aarch64-opc.lo aarch64-asm-2.lo aarch64-dis-2.lo aarch64-opc-2.lo" ;;
++ bfd_sw64_arch) ta="$ta sw64-dis.lo sw64-opc.lo" ;;
+ bfd_alpha_arch) ta="$ta alpha-dis.lo alpha-opc.lo" ;;
+ bfd_amdgcn_arch) ;;
+ bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
+diff -Naur gdb-14.1-after-patch/opcodes/configure.ac gdb-14.1-sw64/opcodes/configure.ac
+--- gdb-14.1-after-patch/opcodes/configure.ac 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/opcodes/configure.ac 2025-03-03 10:59:13.830000000 +0800
+@@ -265,6 +265,7 @@
+ archdefs="$archdefs -DARCH_$ad"
+ case "$arch" in
+ bfd_aarch64_arch) ta="$ta aarch64-asm.lo aarch64-dis.lo aarch64-opc.lo aarch64-asm-2.lo aarch64-dis-2.lo aarch64-opc-2.lo" ;;
++ bfd_sw64_arch) ta="$ta sw64-dis.lo sw64-opc.lo" ;;
+ bfd_alpha_arch) ta="$ta alpha-dis.lo alpha-opc.lo" ;;
+ bfd_amdgcn_arch) ;;
+ bfd_arc_arch) ta="$ta arc-dis.lo arc-opc.lo arc-ext.lo" ;;
+diff -Naur gdb-14.1-after-patch/opcodes/disassemble.c gdb-14.1-sw64/opcodes/disassemble.c
+--- gdb-14.1-after-patch/opcodes/disassemble.c 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/opcodes/disassemble.c 2025-03-03 10:59:13.840000000 +0800
+@@ -26,6 +26,7 @@
+ #ifdef ARCH_all
+ #ifdef BFD64
+ #define ARCH_aarch64
++#define ARCH_sw64
+ #define ARCH_alpha
+ #define ARCH_bpf
+ #define ARCH_ia64
+@@ -126,6 +127,11 @@
+ disassemble = print_insn_aarch64;
+ break;
+ #endif
++#ifdef ARCH_sw64
++ case bfd_arch_sw64:
++ disassemble = print_insn_sw64;
++ break;
++#endif
+ #ifdef ARCH_alpha
+ case bfd_arch_alpha:
+ disassemble = print_insn_alpha;
+@@ -553,6 +559,9 @@
+ #ifdef ARCH_aarch64
+ print_aarch64_disassembler_options (stream);
+ #endif
++#ifdef ARCH_sw64
++ print_sw64_disassembler_options (stream);
++#endif
+ #ifdef ARCH_arc
+ print_arc_disassembler_options (stream);
+ #endif
+@@ -607,6 +616,11 @@
+ info->created_styled_output = true;
+ break;
+ #endif
++#ifdef ARCH_sw64
++ case bfd_arch_sw64:
++ info->created_styled_output = true;
++ break;
++#endif
+ #ifdef ARCH_arc
+ case bfd_arch_arc:
+ info->created_styled_output = true;
+diff -Naur gdb-14.1-after-patch/opcodes/disassemble.h gdb-14.1-sw64/opcodes/disassemble.h
+--- gdb-14.1-after-patch/opcodes/disassemble.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/opcodes/disassemble.h 2025-03-03 10:59:13.840000000 +0800
+@@ -22,6 +22,7 @@
+ #include "dis-asm.h"
+
+ extern int print_insn_aarch64 (bfd_vma, disassemble_info *);
++extern int print_insn_sw64 (bfd_vma, disassemble_info *);
+ extern int print_insn_alpha (bfd_vma, disassemble_info *);
+ extern int print_insn_avr (bfd_vma, disassemble_info *);
+ extern int print_insn_bfin (bfd_vma, disassemble_info *);
+diff -Naur gdb-14.1-after-patch/opcodes/Makefile.in gdb-14.1-sw64/opcodes/Makefile.in
+--- gdb-14.1-after-patch/opcodes/Makefile.in 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/opcodes/Makefile.in 2025-03-03 10:59:13.820000000 +0800
+@@ -483,6 +483,8 @@
+ aarch64-dis-2.c \
+ aarch64-opc.c \
+ aarch64-opc-2.c \
++ sw64-dis.c \
++ sw64-opc.c \
+ alpha-dis.c \
+ alpha-opc.c \
+ bpf-dis.c \
+@@ -871,6 +873,8 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aarch64-dis.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aarch64-opc-2.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aarch64-opc.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sw64-dis.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sw64-opc.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/alpha-dis.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/alpha-opc.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arc-dis.Plo@am__quote@
+diff -Naur gdb-14.1-after-patch/opcodes/sw64-dis.c gdb-14.1-sw64/opcodes/sw64-dis.c
+--- gdb-14.1-after-patch/opcodes/sw64-dis.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/opcodes/sw64-dis.c 2025-03-03 10:59:13.890000000 +0800
+@@ -0,0 +1,256 @@
++/* sw64-dis.c -- Disassemble SW64 AXP instructions
++ Copyright (C) 1996-2023 Free Software Foundation, Inc.
++ Contributed by Richard Henderson ,
++ patterned after the PPC opcode handling written by Ian Lance Taylor.
++
++ This file is part of libopcodes.
++
++ This library is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ It is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this file; see the file COPYING. If not, write to the Free
++ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
++ 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "opintl.h"
++#include
++#include "disassemble.h"
++#include "opcode/sw64.h"
++
++/* OSF register names. */
++
++static const char * const osf_regnames[64] = {
++ "$r0", "$r1", "$r2", "$r3" , "$r4", "$r5", "$r6", "$r7",
++ "$r8", "$r9", "$r10", "$r11" , "$r12", "$r13", "$r14", "fp",
++ "$r16", "$r17", "$r18", "$r19" , "$r20", "$r21", "$r22", "$r23",
++ "$r24", "$r25", "ra", "$r27" , "$r28", "$r29", "sp", "$r31",
++ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
++ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
++ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
++ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31"
++};
++
++/* VMS register names. */
++
++static const char * const vms_regnames[64] = {
++ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
++ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
++ "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
++ "R24", "AI", "RA", "PV", "AT", "FP", "SP", "RZ",
++ "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
++ "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
++ "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
++ "F24", "F25", "F26", "F27", "F28", "F29", "F30", "FZ"
++};
++
++/* Disassemble SW64 instructions. */
++
++int
++print_insn_sw64 (bfd_vma memaddr, struct disassemble_info *info)
++{
++ static const struct sw64_opcode *opcode_index[AXP_NOPS+1];
++ const char * const * regnames;
++ const struct sw64_opcode *opcode, *opcode_end;
++ const unsigned char *opindex;
++ unsigned insn, op, isa_mask;
++ int need_comma;
++
++ /* Initialize the majorop table the first time through */
++ if (!opcode_index[0])
++ {
++ opcode = sw64_opcodes;
++ opcode_end = opcode + sw64_num_opcodes;
++
++ for (op = 0; op < AXP_NOPS; ++op)
++ {
++ opcode_index[op] = opcode;
++ if ((AXP_LITOP (opcode->opcode) != 0x10)
++ && (AXP_LITOP (opcode->opcode) != 0x11))
++ {
++ while (opcode < opcode_end && op == AXP_OP (opcode->opcode))
++ ++opcode;
++ }
++ else
++ {
++ while (opcode < opcode_end && op == AXP_LITOP (opcode->opcode))
++ ++opcode;
++ }
++ }
++ opcode_index[op] = opcode;
++ }
++
++ if (info->flavour == bfd_target_evax_flavour)
++ regnames = vms_regnames;
++ else
++ regnames = osf_regnames;
++
++ isa_mask = AXP_OPCODE_NOPAL;
++ switch (info->mach)
++ {
++ case bfd_mach_sw64:
++ isa_mask |= AXP_OPCODE_BASE | AXP_OPCODE_SW6A;
++ break;
++ case bfd_mach_sw64_sw6b:
++ isa_mask |= AXP_OPCODE_BASE | AXP_OPCODE_SW6B;
++ break;
++ case bfd_mach_sw64_sw8a:
++ isa_mask |= AXP_OPCODE_BASE | AXP_OPCODE_SW8A;
++ break;
++ }
++
++ /* Read the insn into a host word */
++ {
++ bfd_byte buffer[4];
++ int status = (*info->read_memory_func) (memaddr, buffer, 4, info);
++ if (status != 0)
++ {
++ (*info->memory_error_func) (status, memaddr, info);
++ return -1;
++ }
++ insn = bfd_getl32 (buffer);
++ }
++
++ /* Get the major opcode of the instruction. */
++
++ if((AXP_LITOP (insn)==0x10) || (AXP_LITOP (insn)==0x11))
++ op = AXP_LITOP (insn);
++ else if((AXP_OP(insn) & 0x3C) == 0x14 ) //logx
++ op=0x14;
++ else
++ op = AXP_OP (insn);
++
++ /* Find the first match in the opcode table. */
++ opcode_end = opcode_index[op + 1];
++ for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode)
++ {
++ if ((insn ^ opcode->opcode) & opcode->mask)
++ continue;
++
++ if (!(opcode->flags & isa_mask))
++ continue;
++
++ /* Make two passes over the operands. First see if any of them
++ have extraction functions, and, if they do, make sure the
++ instruction is valid. */
++ {
++ int invalid = 0;
++ for (opindex = opcode->operands; *opindex != 0; opindex++)
++ {
++ const struct sw64_operand *operand = sw64_operands + *opindex;
++ if (operand->extract)
++ (*operand->extract) (insn, &invalid);
++ }
++ if (invalid)
++ continue;
++ }
++
++ /* The instruction is valid. */
++ goto found;
++ }
++
++ /* No instruction found */
++ (*info->fprintf_func) (info->stream, ".long %#08x", insn);
++
++ return 4;
++
++found:
++ if (!strncmp ("sys_call", opcode->name,8))
++ {
++ if (insn & (0x1 << 25))
++ (*info->fprintf_func) (info->stream, "%s", "sys_call");
++ else
++ (*info->fprintf_func) (info->stream, "%s", "sys_call/b");
++ }
++ else
++ (*info->fprintf_func) (info->stream, "%s", opcode->name);
++
++/* get zz[7:6] and zz[5:0] to form truth for vlog */
++ if (!strcmp(opcode->name,"vlog"))
++ {
++ unsigned int truth;
++ char tr[4];
++ truth = (AXP_OP (insn) & 3) << 6;
++ truth = truth | ((insn & 0xFC00) >> 10);
++ sprintf (tr,"%x",truth);
++ (*info->fprintf_func) (info->stream, "%s", tr);
++ }
++ if (opcode->operands[0] != 0)
++ (*info->fprintf_func) (info->stream, "\t");
++
++ /* Now extract and print the operands. */
++ need_comma = 0;
++ for (opindex = opcode->operands; *opindex != 0; opindex++)
++ {
++ const struct sw64_operand *operand = sw64_operands + *opindex;
++ int value;
++
++ /* Operands that are marked FAKE are simply ignored. We
++ already made sure that the extract function considered
++ the instruction to be valid. */
++ if ((operand->flags & AXP_OPERAND_FAKE) != 0)
++ continue;
++
++ /* Extract the value from the instruction. */
++ if (operand->extract)
++ value = (*operand->extract) (insn, (int *) NULL);
++ else
++ {
++ value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
++ if (operand->flags & AXP_OPERAND_SIGNED)
++ {
++ int signbit = 1 << (operand->bits - 1);
++ value = (value ^ signbit) - signbit;
++ }
++ }
++
++ if (need_comma &&
++ ((operand->flags & (AXP_OPERAND_PARENS | AXP_OPERAND_COMMA))
++ != AXP_OPERAND_PARENS))
++ {
++ (*info->fprintf_func) (info->stream, ",");
++ }
++ if (operand->flags & AXP_OPERAND_PARENS)
++ (*info->fprintf_func) (info->stream, "(");
++
++ /* Print the operand as directed by the flags. */
++ if (operand->flags & AXP_OPERAND_IR)
++ (*info->fprintf_func) (info->stream, "%s", regnames[value]);
++ else if (operand->flags & AXP_OPERAND_FPR)
++ (*info->fprintf_func) (info->stream, "%s", regnames[value + 32]);
++ else if (operand->flags & AXP_OPERAND_RELATIVE)
++ (*info->print_address_func) (memaddr + 4 + value, info);
++ else if (operand->flags & AXP_OPERAND_SIGNED)
++ (*info->fprintf_func) (info->stream, "%d", value);
++ else
++ (*info->fprintf_func) (info->stream, "%#x", value);
++
++ if (operand->flags & AXP_OPERAND_PARENS)
++ (*info->fprintf_func) (info->stream, ")");
++ need_comma = 1;
++ }
++
++ return 4;
++}
++
++void
++print_sw64_disassembler_options (FILE *stream)
++{
++ fprintf (stream, _("\n\
++The following SW64 disassembler options are supported for use\n\
++with the -M switch (multiple options should be separated by commas):\n"));
++
++ fprintf (stream, _("\n\
++ no-aliases Use canonical instruction forms.\n"));
++ fprintf (stream, _("\n\
++ numeric Print numeric register names, rather than ABI names.\n"));
++ fprintf (stream, _("\n"));
++}
+diff -Naur gdb-14.1-after-patch/opcodes/sw64-opc.c gdb-14.1-sw64/opcodes/sw64-opc.c
+--- gdb-14.1-after-patch/opcodes/sw64-opc.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/opcodes/sw64-opc.c 2025-03-03 10:59:13.890000000 +0800
+@@ -0,0 +1,1057 @@
++/* sw64-opc.c -- SW64 AXP opcode list
++ Copyright (C) 1996-2023 Free Software Foundation, Inc.
++ Contributed by Richard Henderson ,
++ patterned after the PPC opcode handling written by Ian Lance Taylor.
++
++ This file is part of libopcodes.
++
++ This library is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ It is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this file; see the file COPYING. If not, write to the
++ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
++ 02110-1301, USA. */
++
++#include "sysdep.h"
++#include
++#include "opcode/sw64.h"
++#include "bfd.h"
++#include "opintl.h"
++
++/* This file holds the SW64 AXP opcode table. The opcode table includes
++ almost all of the extended instruction mnemonics. This permits the
++ disassembler to use them, and simplifies the assembler logic, at the
++ cost of increasing the table size. The table is strictly constant
++ data, so the compiler should be able to put it in the text segment.
++
++ This file also holds the operand table. All knowledge about inserting
++ and extracting operands from instructions is kept in this file.
++
++ The information for the base instruction set was compiled from the
++ _SW64 Architecture Handbook_, Digital Order Number EC-QD2KB-TE,
++ version 2.
++ */
++
++/* The RB field when it is the same as the RA field in the same insn.
++ This operand is marked fake. The insertion function just copies
++ the RA field into the RB field, and the extraction function just
++ checks that the fields are the same. */
++
++static unsigned
++insert_rba (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | (((insn >> 21) & 0x1f) << 16);
++}
++
++static int
++extract_rba (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL
++ && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
++ *invalid = 1;
++ return 0;
++}
++
++/* The same for the RC field. */
++
++static unsigned
++insert_rca (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | ((insn >> 21) & 0x1f);
++}
++
++static int
++extract_rca (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL
++ && ((insn >> 21) & 0x1f) != (insn & 0x1f))
++ *invalid = 1;
++ return 0;
++}
++
++static unsigned
++insert_rdc (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | ((insn >> 5) & 0x1f);
++}
++
++static int
++extract_rdc (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL
++ && ((insn >> 5) & 0x1f) != (insn & 0x1f))
++ *invalid = 1;
++ return 0;
++}
++
++/* Fake arguments in which the registers must be set to ZERO. */
++
++static unsigned
++insert_za (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | (31 << 21);
++}
++
++static int
++extract_za (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != 31)
++ *invalid = 1;
++ return 0;
++}
++
++static unsigned
++insert_zb (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | (31 << 16);
++}
++
++static int
++extract_zb (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL && ((insn >> 16) & 0x1f) != 31)
++ *invalid = 1;
++ return 0;
++}
++
++static unsigned
++insert_zc (unsigned insn,
++ int value ATTRIBUTE_UNUSED,
++ const char **errmsg ATTRIBUTE_UNUSED)
++{
++ return insn | 31;
++}
++
++static int
++extract_zc (unsigned insn, int *invalid)
++{
++ if (invalid != (int *) NULL && (insn & 0x1f) != 31)
++ *invalid = 1;
++ return 0;
++}
++
++
++/* The displacement field of a Branch format insn. */
++
++static unsigned
++insert_bdisp (unsigned insn, int value, const char **errmsg)
++{
++ if (errmsg != (const char **)NULL && (value & 3))
++ *errmsg = _("branch operand unaligned");
++ return insn | ((value / 4) & 0x1FFFFF);
++}
++
++static int
++extract_bdisp (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
++{
++ return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000);
++}
++
++/* The hint field of a JMP/JSR insn. */
++
++/* sw use 16 bits hint disp. */
++static unsigned
++insert_jhint (unsigned insn, int value, const char **errmsg)
++{
++ if (errmsg != (const char **)NULL && (value & 3))
++ *errmsg = _("jump hint unaligned");
++ return insn | ((value / 4) & 0xFFFF);
++}
++
++static int
++extract_jhint (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
++{
++ return 4 * (((insn & 0xFFFF) ^ 0x8000) - 0x8000);
++}
++
++/* The hint field of an SW6 HW_JMP/JSR insn. */
++
++static unsigned
++insert_sw6hwjhint (unsigned insn, int value, const char **errmsg)
++{
++ if (errmsg != (const char **)NULL && (value & 3))
++ *errmsg = _("jump hint unaligned");
++ return insn | ((value / 4) & 0x1FFF);
++}
++
++static int
++extract_sw6hwjhint (unsigned insn, int *invalid ATTRIBUTE_UNUSED)
++{
++ return 4 * (((insn & 0x1FFF) ^ 0x1000) - 0x1000);
++}
++
++/* The operands table. */
++
++const struct sw64_operand sw64_operands[] =
++{
++ /* The fields are bits, shift, insert, extract, flags */
++ /* The zero index is used to indicate end-of-list */
++#define UNUSED 0
++ { 0, 0, 0, 0, 0, 0 },
++
++ /* The plain integer register fields. */
++#define RA (UNUSED + 1)
++ { 5, 21, 0, AXP_OPERAND_IR, 0, 0 },
++#define RB (RA + 1)
++ { 5, 16, 0, AXP_OPERAND_IR, 0, 0 },
++#define RC (RB + 1)
++ { 5, 0, 0, AXP_OPERAND_IR, 0, 0 },
++
++ /* The plain fp register fields. */
++#define FA (RC + 1)
++ { 5, 21, 0, AXP_OPERAND_FPR, 0, 0 },
++#define FB (FA + 1)
++ { 5, 16, 0, AXP_OPERAND_FPR, 0, 0 },
++#define FC (FB + 1)
++ { 5, 0, 0, AXP_OPERAND_FPR, 0, 0 },
++
++ /* The integer registers when they are ZERO. */
++#define ZA (FC + 1)
++ { 5, 21, 0, AXP_OPERAND_FAKE, insert_za, extract_za },
++#define ZB (ZA + 1)
++ { 5, 16, 0, AXP_OPERAND_FAKE, insert_zb, extract_zb },
++#define ZC (ZB + 1)
++ { 5, 0, 0, AXP_OPERAND_FAKE, insert_zc, extract_zc },
++
++ /* The RB field when it needs parentheses. */
++#define PRB (ZC + 1)
++ { 5, 16, 0, AXP_OPERAND_IR|AXP_OPERAND_PARENS, 0, 0 },
++
++ /* The RB field when it needs parentheses _and_ a preceding comma. */
++#define CPRB (PRB + 1)
++ { 5, 16, 0,
++ AXP_OPERAND_IR|AXP_OPERAND_PARENS|AXP_OPERAND_COMMA, 0, 0 },
++
++ /* The RB field when it must be the same as the RA field. */
++#define RBA (CPRB + 1)
++ { 5, 16, 0, AXP_OPERAND_FAKE, insert_rba, extract_rba },
++
++ /* The RC field when it must be the same as the RB field. */
++#define RCA (RBA + 1)
++ { 5, 0, 0, AXP_OPERAND_FAKE, insert_rca, extract_rca },
++
++#define RDC (RCA + 1)
++ { 5, 0, 0, AXP_OPERAND_FAKE, insert_rdc, extract_rdc },
++
++ /* The RC field when it can *default* to RA. */
++#define DRC1 (RDC + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_IR|AXP_OPERAND_DEFAULT_FIRST, 0, 0 },
++
++ /* The RC field when it can *default* to RB. */
++#define DRC2 (DRC1 + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_IR|AXP_OPERAND_DEFAULT_SECOND, 0, 0 },
++
++ /* The RD field when it can *default* to RC. */
++#define DRC3 (DRC2 + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_IR|AXP_OPERAND_DEFAULT_THIRD, 0, 0 },
++
++ /* The FC field when it can *default* to RA. */
++#define DFC1 (DRC3 + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_FPR|AXP_OPERAND_DEFAULT_FIRST, 0, 0 },
++
++ /* The FC field when it can *default* to RB. */
++#define DFC2 (DFC1 + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_FPR|AXP_OPERAND_DEFAULT_SECOND, 0, 0 },
++
++ /* The FD field when it can *default* to FC. */
++#define DFC3 (DFC2 + 1)
++ { 5, 0, 0,
++ AXP_OPERAND_FPR|AXP_OPERAND_DEFAULT_THIRD, 0, 0 },
++
++ /* The unsigned 8-bit literal of Operate format insns. */
++#define LIT (DFC3 + 1)
++ { 8, 13, -LIT, AXP_OPERAND_UNSIGNED, 0, 0 },
++
++ /* The signed 16-bit displacement of Memory format insns. From here
++ we can't tell what relocation should be used, so don't use a default. */
++#define MDISP (LIT + 1)
++ { 16, 0, -MDISP, AXP_OPERAND_SIGNED, 0, 0 },
++
++ /* The signed "23-bit" aligned displacement of Branch format insns. */
++#define BDISP (MDISP + 1)
++ { 21, 0, BFD_RELOC_23_PCREL_S2,
++ AXP_OPERAND_RELATIVE, insert_bdisp, extract_bdisp },
++
++ /* The 25-bit PALcode function. */
++#define PALFN (BDISP + 1)
++ { 25, 0, -PALFN, AXP_OPERAND_UNSIGNED, 0, 0 },
++
++ /* sw jsr/ret insntructions has no function bits. */
++ /* The optional signed "16-bit" aligned displacement of the JMP/JSR hint. */
++#define JMPHINT (PALFN + 1)
++ { 16, 0, BFD_RELOC_SW64_HINT,
++ AXP_OPERAND_RELATIVE|AXP_OPERAND_DEFAULT_ZERO|AXP_OPERAND_NOOVERFLOW,
++ insert_jhint, extract_jhint },
++
++ /* The optional hint to RET/JSR_COROUTINE. */
++#define RETHINT (JMPHINT + 1)
++ { 16, 0, -RETHINT,
++ AXP_OPERAND_UNSIGNED|AXP_OPERAND_DEFAULT_ZERO, 0, 0 },
++
++#define SW6HWDISP (RETHINT + 1)
++ {12, 0, -SW6HWDISP, AXP_OPERAND_SIGNED, 0, 0 },
++
++ /* The 16-bit combined index/scoreboard mask for the sw6
++ hw_m[ft]pr (pal19/pal1d) insns. */
++#define SW6HWINDEX (SW6HWDISP + 1)
++ { 16, 0, -SW6HWINDEX, AXP_OPERAND_UNSIGNED, 0, 0 },
++
++ /* The 13-bit branch hint for the sw6 hw_jmp/jsr (pal1e) insn. */
++#define SW6HWJMPHINT (SW6HWINDEX+ 1)
++ { 8, 0, -SW6HWJMPHINT,
++ AXP_OPERAND_RELATIVE|AXP_OPERAND_DEFAULT_ZERO|AXP_OPERAND_NOOVERFLOW,
++ insert_sw6hwjhint, extract_sw6hwjhint },
++
++ /* for the third operand of ternary operands integer insn. */
++#define R3 (SW6HWJMPHINT + 1)
++ { 5, 5, 0, AXP_OPERAND_IR, 0, 0 },
++
++ /* The plain fp register fields */
++#define F3 (R3 + 1)
++ { 5, 5, 0, AXP_OPERAND_FPR, 0, 0 },
++
++/* sw simd settle instruction lit */
++#define FMALIT (F3 + 1)
++ { 5, 5, -FMALIT, AXP_OPERAND_UNSIGNED, 0, 0 },//V1.1
++
++/*for pal to check disp which must be plus sign and less than 0x8000,WCH20080901*/
++#define LMDISP (FMALIT + 1)
++ { 15, 0, -LMDISP, AXP_OPERAND_UNSIGNED, 0, 0 },
++
++#define RPIINDEX (LMDISP + 1)
++ { 8, 0, -RPIINDEX, AXP_OPERAND_UNSIGNED, 0, 0 },
++
++#define ATMDISP (RPIINDEX + 1)
++ { 12, 0, -ATMDISP, AXP_OPERAND_SIGNED, 0, 0 },
++};
++
++const unsigned sw64_num_operands = sizeof(sw64_operands)/sizeof(*sw64_operands);
++
++
++/* Macros used to form opcodes. */
++
++/* The main opcode. */
++#define OP(x) (((x) & 0x3Fu) << 26)
++#define OP_MASK 0xFC000000
++
++/* Branch format instructions. */
++#define BRA_(oo) OP(oo)
++#define BRA_MASK OP_MASK
++#define BRA(oo) BRA_(oo), BRA_MASK
++
++/* Floating point format instructions. */
++#define FP_(oo,fff) (OP(oo) | (((fff) & 0xFF) << 5))
++#define FP_MASK (OP_MASK | 0x1FE0)
++#define FP(oo,fff) FP_(oo,fff), FP_MASK
++
++#define FMA_(oo,fff) (OP(oo) | (((fff) & 0x3F) << 10 ))
++#define FMA_MASK (OP_MASK | 0xFC00)
++#define FMA(oo,fff) FMA_(oo,fff), FMA_MASK
++
++/* Memory format instructions. */
++#define MEM_(oo) OP(oo)
++#define MEM_MASK OP_MASK
++#define MEM(oo) MEM_(oo), MEM_MASK
++
++/* Memory/Func Code format instructions. */
++#define MFC_(oo,ffff) (OP(oo) | ((ffff) & 0xFFFF))
++#define MFC_MASK (OP_MASK | 0xFFFF)
++#define MFC(oo,ffff) MFC_(oo,ffff), MFC_MASK
++
++/* Memory/Branch format instructions. */
++#define MBR_(oo,h) (OP(oo) | (((h) & 3) << 14))
++#define MBR_MASK (OP_MASK | 0xC000)
++#define MBR(oo,h) MBR_(oo,h), MBR_MASK
++
++/* Operate format instructions. The OPRL variant specifies a
++ literal second argument. */
++#define OPR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5))
++#define OPRL_(oo,ff) (OPR_((oo),(ff)) )
++#define OPR_MASK (OP_MASK | 0x1FE0)
++#define OPR(oo,ff) OPR_(oo,ff), OPR_MASK
++#define OPRL(oo,ff) OPRL_(oo,ff), OPR_MASK
++
++/* sw ternary operands Operate format instructions. */
++#define TOPR_(oo,ff) (OP(oo) | (((ff) & 0x07) << 10))
++#define TOPRL_(oo,ff) (TOPR_((oo),(ff)))
++#define TOPR_MASK (OP_MASK | 0x1C00)
++#define TOPR(oo,ff) TOPR_(oo,ff), TOPR_MASK
++#define TOPRL(oo,ff) TOPRL_(oo,ff), TOPR_MASK
++
++/* sw atom instructions. */
++#define ATMEM_(oo,h) (OP(oo) | (((h) & 0xF) << 12))
++#define ATMEM_MASK (OP_MASK | 0xF000)
++#define ATMEM(oo,h) ATMEM_(oo,h), ATMEM_MASK
++
++/* sw privilege instructions. */
++#define PRIRET_(oo,h) (OP(oo) | (((h) & 0x1) << 20))
++#define PRIRET_MASK (OP_MASK | 0x100000)
++#define PRIRET(oo,h) PRIRET_(oo,h), PRIRET_MASK
++
++/* sw rpi_rcsr,rpi_wcsr. */
++#define CSR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8))
++#define CSR_MASK (OP_MASK | 0xFF00)
++#define CSR(oo,ff) CSR_(oo,ff), CSR_MASK
++
++/* Generic PALcode format instructions. */
++#define PCD_(oo,ff) (OP(oo) | (ff << 25))
++#define PCD_MASK OP_MASK
++#define PCD(oo,ff) PCD_(oo,ff), PCD_MASK
++
++/* Specific PALcode instructions. */
++#define SPCD_(oo,ffff) (OP(oo) | ((ffff) & 0x3FFFFFF))
++#define SPCD_MASK 0xFFFFFFFF
++#define SPCD(oo,ffff) SPCD_(oo,ffff), SPCD_MASK
++
++/* Hardware memory (hw_{ld,st}) instructions. */
++#define SW6HWMEM_(oo,f) (OP(oo) | (((f) & 0xF) << 12))
++#define SW6HWMEM_MASK (OP_MASK | 0xF000)
++#define SW6HWMEM(oo,f) SW6HWMEM_(oo,f), SW6HWMEM_MASK
++
++#define SW6HWMBR_(oo,h) (OP(oo) | (((h) & 7) << 13))
++#define SW6HWMBR_MASK (OP_MASK | 0xE000)
++#define SW6HWMBR(oo,h) SW6HWMBR_(oo,h), SW6HWMBR_MASK
++
++#define LOGX_(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10))
++#define LOGX_MASK (0xF0000000)
++#define LOGX(oo,ff) LOGX_(oo,ff), LOGX_MASK
++
++#define PSE_LOGX_(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10) | (((ff) >> 0x6) << 26 ) | 0x3E0 )
++#define PSE_LOGX(oo,ff) PSE_LOGX_(oo,ff), LOGX_MASK
++
++/* Abbreviations for instruction subsets. */
++//#define SW6 AXP_OPCODE_SW6
++#define BASE AXP_OPCODE_BASE
++#define SW6A AXP_OPCODE_SW6A
++#define SW6B AXP_OPCODE_SW6B
++#define SW8A AXP_OPCODE_SW8A
++
++/* Common combinations of arguments. */
++#define ARG_NONE { 0 }
++#define ARG_BRA { RA, BDISP }
++#define ARG_FBRA { FA, BDISP }
++#define ARG_FP { FA, FB, DFC1 }
++#define ARG_FPZ1 { ZA, FB, DFC1 }
++#define ARG_MEM { RA, MDISP, PRB }
++#define ARG_FMEM { FA, MDISP, PRB }
++#define ARG_OPR { RA, RB, DRC1 }
++#define ARG_OPRL { RA, LIT, DRC1 }
++#define ARG_OPRZ1 { ZA, RB, DRC1 }
++#define ARG_OPRLZ1 { ZA, LIT, RC }
++#define ARG_PCD { PALFN }
++#define ARG_SW6HWMEM { RA, SW6HWDISP, PRB }
++
++#define ARG_FPL { FA,LIT, DFC1 }
++#define ARG_FMA { FA,FB,F3, DFC1 }
++#define ARG_PREFETCH { ZA, MDISP, PRB }
++#define ARG_FCMOV { FA,FB,F3, DFC3 }
++#define ARG_TOPR { RA, RB,R3, DRC3 }
++#define ARG_TOPRL { RA, LIT, R3,DRC3 }
++
++/* for cmov** instruction. */
++#define ARG_TOPC { RA, RB, R3, RDC }
++#define ARG_TOPCL { RA, LIT, R3, RDC }
++#define ARG_TOPFC { FA, FB, F3, RDC }
++#define ARG_TOPFCL { FA, LIT, F3, RDC }
++
++/* sw settle instruction. */
++#define ARG_FMAL { FA,FB,FMALIT, DFC1 }
++/* sw atom insitruction. */
++#define ARG_ATMEM { RA, ATMDISP, PRB }
++
++#define ARG_VUAMEM { FA, ATMDISP, PRB }
++#define ARG_OPRLZ3 { RA, LIT, ZC }
++
++/* The opcode table.
++
++ The format of the opcode table is:
++
++ NAME OPCODE MASK { OPERANDS }
++
++ NAME is the name of the instruction.
++
++ OPCODE is the instruction opcode.
++
++ MASK is the opcode mask; this is used to tell the disassembler
++ which bits in the actual opcode must match OPCODE.
++
++ OPERANDS is the list of operands.
++
++ The preceding macros merge the text of the OPCODE and MASK fields.
++
++ The disassembler reads the table in order and prints the first
++ instruction which matches, so this table is sorted to put more
++ specific instructions before more general instructions.
++
++ Otherwise, it is sorted by major opcode and minor function code.
++
++ There are three classes of not-really-instructions in this table:
++
++ ALIAS is another name for another instruction. Some of
++ these come from the Architecture Handbook, some
++ come from the original gas opcode tables. In all
++ cases, the functionality of the opcode is unchanged.
++
++ PSEUDO a stylized code form endorsed by Chapter A.4 of the
++ Architecture Handbook.
++
++ EXTRA a stylized code form found in the original gas tables.
++ */
++
++const struct sw64_opcode sw64_opcodes[] =
++{
++ { "sys_call/b", PCD(0x00,0x00), BASE, ARG_PCD },
++ { "sys_call", PCD(0x00,0x01), BASE, ARG_PCD },
++ { "draina", SPCD(0x00,0x0002), BASE, ARG_NONE },
++ { "bpt", SPCD(0x00,0x0080), BASE, ARG_NONE },
++ { "bugchk", SPCD(0x00,0x0081), BASE, ARG_NONE },
++ { "callsys", SPCD(0x00,0x0083), BASE, ARG_NONE },
++ { "chmk", SPCD(0x00,0x0083), BASE, ARG_NONE },
++ { "imb", SPCD(0x00,0x0086), BASE, ARG_NONE },
++ { "rduniq", SPCD(0x00,0x009e), BASE, ARG_NONE },
++ { "wruniq", SPCD(0x00,0x009f), BASE, ARG_NONE },
++ { "gentrap", SPCD(0x00,0x00aa), BASE, ARG_NONE },
++ { "call", MEM(0x01), BASE, { RA, CPRB, JMPHINT } },
++ { "ret", MEM(0x02), BASE, { RA, CPRB, RETHINT } },
++ { "ret", MEM_(0x02)| (31 << 21) | (26 << 16) | 1,0xFFFFFFFF, BASE, { 0 } }, /*pseudo*/
++ { "jmp", MEM(0x03), BASE, { RA, CPRB, JMPHINT } },
++ { "br", BRA(0x04), BASE, { ZA, BDISP } }, /* pseudo */
++ { "br", BRA(0x04), BASE, ARG_BRA },
++ { "bsr", BRA(0x05), BASE, ARG_BRA },
++ { "memb", MFC(0x06,0x0000), BASE, ARG_NONE },
++ { "imemb", MFC(0x06,0x0001), BASE, ARG_NONE },
++ { "rtc", MFC(0x06,0x0020), BASE, { RA, ZB } },
++ { "rtc", MFC(0x06,0x0020), BASE, { RA, RB } },
++ { "rcid", MFC(0x06,0x0040), BASE, { RA , ZB} },
++ { "halt", MFC(0x06,0x0080), BASE, { ZA, ZB } },
++ { "rd_f", MFC(0x06,0x1000), BASE, { RA, ZB } },
++ { "wr_f", MFC(0x06,0x1020), BASE, { RA, ZB } },
++ { "rtid", MFC(0x06,0x1040), BASE, { RA } },
++ { "pri_rcsr", CSR(0x06,0xFE), BASE, { RA, RPIINDEX ,ZB } },
++ { "pri_wcsr", CSR(0x06,0xFF), BASE, { RA, RPIINDEX ,ZB } },
++ { "pri_ret", PRIRET(0x07,0x0), BASE, { RA } },
++ { "pri_ret/b", PRIRET(0x07,0x1), BASE, { RA } },
++ { "lldw", ATMEM(0x08,0x0), BASE, ARG_ATMEM },
++ { "lldl", ATMEM(0x08,0x1), BASE, ARG_ATMEM },
++ { "ldw_inc", ATMEM(0x08,0x2), BASE, ARG_ATMEM },
++ { "ldl_inc", ATMEM(0x08,0x3), BASE, ARG_ATMEM },
++ { "ldw_dec", ATMEM(0x08,0x4), BASE, ARG_ATMEM },
++ { "ldl_dec", ATMEM(0x08,0x5), BASE, ARG_ATMEM },
++ { "ldw_set", ATMEM(0x08,0x6), BASE, ARG_ATMEM },
++ { "ldl_set", ATMEM(0x08,0x7), BASE, ARG_ATMEM },
++ { "lstw", ATMEM(0x08,0x8), BASE, ARG_ATMEM },
++ { "lstl", ATMEM(0x08,0x9), BASE, ARG_ATMEM },
++ { "ldw_nc", ATMEM(0x08,0xA), BASE, ARG_ATMEM },
++ { "ldl_nc", ATMEM(0x08,0xB), BASE, ARG_ATMEM },
++ { "ldd_nc", ATMEM(0x08,0xC), BASE, ARG_VUAMEM },
++ { "stw_nc", ATMEM(0x08,0xD), BASE, ARG_ATMEM },
++ { "stl_nc", ATMEM(0x08,0xE), BASE, ARG_ATMEM },
++ { "std_nc", ATMEM(0x08,0xF), BASE, ARG_VUAMEM },
++ { "fillcs", MEM(0x09), BASE, ARG_PREFETCH },
++ { "ldwe", MEM(0x09), BASE, ARG_FMEM }, //sw6 v0.2a
++ { "e_fillcs", MEM(0x0A), BASE, ARG_PREFETCH },
++ { "ldse", MEM(0x0A), BASE, ARG_FMEM },
++ { "lds4e", MEM(0x0A), BASE, ARG_FMEM },/* pseudo BASE SIMD WCH20081028*/
++ { "fillcs_e", MEM(0x0B), BASE, ARG_PREFETCH },
++ { "ldde", MEM(0x0B), BASE, ARG_FMEM },
++ { "ldd4e", MEM(0x0B), BASE, ARG_FMEM },/* pseudo BASE SIMD WCH20081028*/
++ { "e_fillde", MEM(0x0C), BASE, ARG_PREFETCH },
++ { "vlds", MEM(0x0C), BASE, ARG_FMEM },
++ { "v4lds", MEM(0x0C), BASE, ARG_FMEM },
++ { "vldd", MEM(0x0D), BASE, ARG_FMEM },
++ { "v4ldd", MEM(0x0D), BASE, ARG_FMEM },
++ { "vsts", MEM(0x0E), BASE, ARG_FMEM },
++ { "v4sts", MEM(0x0E), BASE, ARG_FMEM },
++ { "vstd", MEM(0x0F), BASE, ARG_FMEM },
++ { "v4std", MEM(0x0F), BASE, ARG_FMEM },
++ { "addw", OPR(0x10,0x00), BASE, ARG_OPR },
++ { "addw", OPRL(0x12,0x00), BASE, ARG_OPRL },
++ { "sextl", OPR(0x10,0x00), BASE, ARG_OPRZ1 }, /* pseudo */
++ { "sextl", OPRL(0x12,0x00), BASE, ARG_OPRLZ1 }, /* pseudo */
++ { "subw", OPR(0x10,0x01), BASE, ARG_OPR },
++ { "subw", OPRL(0x12,0x01), BASE, ARG_OPRL },
++ { "negw", OPR(0x10,0x01), BASE, ARG_OPRZ1 }, /* pseudo swgcc */
++ { "negw", OPRL(0x12,0x01), BASE, ARG_OPRLZ1 }, /* pseudo swgcc */
++ { "s4addw", OPR(0x10,0x02), BASE, ARG_OPR },
++ { "s4addw", OPRL(0x12,0x02), BASE, ARG_OPRL },
++ { "s4subw", OPR(0x10,0x03), BASE, ARG_OPR },
++ { "s4subw", OPRL(0x12,0x03), BASE, ARG_OPRL },
++ { "s8addw", OPR(0x10,0x04), BASE, ARG_OPR },
++ { "s8addw", OPRL(0x12,0x04), BASE, ARG_OPRL },
++ { "s8subw", OPR(0x10,0x05), BASE, ARG_OPR },
++ { "s8subw", OPRL(0x12,0x05), BASE, ARG_OPRL },
++ { "addl", OPR(0x10,0x08), BASE, ARG_OPR },
++ { "addl", OPRL(0x12,0x08), BASE, ARG_OPRL },
++ { "subl", OPR(0x10,0x09), BASE, ARG_OPR },
++ { "subl", OPRL(0x12,0x09), BASE, ARG_OPRL },
++ { "negl", OPR(0x10,0x09), BASE, ARG_OPRZ1 }, /* pseudo swgcc */
++ { "negl", OPRL(0x12,0x09), BASE, ARG_OPRLZ1 }, /* pseudo swgcc */
++ { "neglv", OPR(0x10,0x09), BASE, ARG_OPRZ1 }, /* pseudo swgcc */
++ { "neglv", OPRL(0x12,0x09), BASE, ARG_OPRLZ1 }, /* pseudo swgcc */
++ { "s4addl", OPR(0x10,0x0A), BASE, ARG_OPR },
++ { "s4addl", OPRL(0x12,0x0A), BASE, ARG_OPRL },
++ { "s4subl", OPR(0x10,0x0B), BASE, ARG_OPR },
++ { "s4subl", OPRL(0x12,0x0B), BASE, ARG_OPRL },
++ { "s8addl", OPR(0x10,0x0C), BASE, ARG_OPR },
++ { "s8addl", OPRL(0x12,0x0C), BASE, ARG_OPRL },
++ { "s8subl", OPR(0x10,0x0D), BASE, ARG_OPR },
++ { "s8subl", OPRL(0x12,0x0D), BASE, ARG_OPRL },
++ { "mulw", OPR(0x10,0x10), BASE, ARG_OPR },
++ { "mulw", OPRL(0x12,0x10), BASE, ARG_OPRL },
++ { "divw", OPR(0x10,0x11), BASE, ARG_OPR },
++ { "udivw", OPR(0x10,0x12), BASE, ARG_OPR },
++ { "remw", OPR(0x10,0x13), BASE, ARG_OPR },
++ { "uremw", OPR(0x10,0x14), BASE, ARG_OPR },
++ { "mull", OPR(0x10,0x18), BASE, ARG_OPR },
++ { "mull", OPRL(0x12,0x18), BASE, ARG_OPRL },
++ { "umulh", OPR(0x10,0x19), BASE, ARG_OPR },
++ { "umulh", OPRL(0x12,0x19), BASE, ARG_OPRL },
++ { "cmpeq", OPR(0x10,0x28), BASE, ARG_OPR },
++ { "cmpeq", OPRL(0x12,0x28), BASE, ARG_OPRL },
++ { "cmplt", OPR(0x10,0x29), BASE, ARG_OPR },
++ { "cmplt", OPRL(0x12,0x29), BASE, ARG_OPRL },
++ { "cmple", OPR(0x10,0x2A), BASE, ARG_OPR },
++ { "cmple", OPRL(0x12,0x2A), BASE, ARG_OPRL },
++ { "cmpult", OPR(0x10,0x2B), BASE, ARG_OPR },
++ { "cmpult", OPRL(0x12,0x2B), BASE, ARG_OPRL },
++ { "cmpule", OPR(0x10,0x2C), BASE, ARG_OPR },
++ { "cmpule", OPRL(0x12,0x2C), BASE, ARG_OPRL },
++ { "and", OPR(0x10,0x38), BASE, ARG_OPR },
++ { "and", OPRL(0x12,0x38),BASE, ARG_OPRL },
++ { "bic", OPR(0x10,0x39), BASE, ARG_OPR },
++ { "bic", OPRL(0x12,0x39),BASE, ARG_OPRL },
++ { "andnot", OPR(0x10,0x39), BASE, ARG_OPR },/* pseudo */
++ { "andnot", OPRL(0x12,0x39),BASE, ARG_OPRL },/* pseudo */
++ { "nop", OPR(0x10,0x3A), BASE, { ZA, ZB, ZC } }, /* now unop has a new expression */
++ { "excb", OPR(0x10,0x3A), BASE, { ZA, ZB, ZC } }, /* pseudo */
++ { "clr", OPR(0x10,0x3A),BASE, { ZA, ZB, RC } }, /* pseudo swgcc */
++ { "mov", OPR(0x10,0x3A), BASE, { ZA, RB, RC } }, /* pseudo */
++ { "mov", OPRL(0x12,0x3A),BASE, { ZA, LIT, RC } }, /* pseudo */
++ { "implver", OPRL_(0x12,0x3A)|2<<13,0xFFFFFFE0,BASE, {ZA,RC } }, /* pseudo swgcc */
++ { "amask", OPR_(0x10,0x3A)|31<<16,OPR_MASK, BASE, { ZA, RB, RC } }, /* pseudo */
++ { "amask", OPRL(0x12,0x3A), BASE, { ZA, LIT, RC } }, /* pseudo */
++ { "or", OPR(0x10,0x3A), BASE, ARG_OPR },
++ { "or", OPRL(0x12,0x3A),BASE, ARG_OPRL },
++ { "bis", OPR(0x10,0x3A), BASE, ARG_OPR },
++ { "bis", OPRL(0x12,0x3A),BASE, ARG_OPRL },
++ { "not", OPR(0x10,0x3B), BASE, ARG_OPRZ1 }, /* pseudo swgcc */
++ { "not", OPRL(0x12,0x3B),BASE, ARG_OPRLZ1 }, /* pseudo swgcc */
++ { "ornot", OPR(0x10,0x3B), BASE, ARG_OPR },
++ { "ornot", OPRL(0x12,0x3B),BASE, ARG_OPRL },
++ { "xor", OPR(0x10,0x3C), BASE, ARG_OPR },
++ { "xor", OPRL(0x12,0x3C),BASE, ARG_OPRL },
++ { "eqv", OPR(0x10,0x3D), BASE, ARG_OPR },
++ { "eqv", OPRL(0x12,0x3D),BASE, ARG_OPRL },
++ { "xornot", OPR(0x10,0x3D), BASE, ARG_OPR }, /* pseudo swgcc */
++ { "xornot", OPRL(0x12,0x3D),BASE, ARG_OPRL },/* pseudo swgcc */
++ { "inslb", OPR(0x10,0x40), BASE, ARG_OPR },
++ { "inslb", OPRL(0x12,0x40),BASE, ARG_OPRL },
++ { "ins0b", OPR(0x10,0x40), BASE, ARG_OPR },
++ { "ins0b", OPRL(0x12,0x40),BASE, ARG_OPRL },
++ { "inslh", OPR(0x10,0x41), BASE, ARG_OPR },
++ { "inslh", OPRL(0x12,0x41),BASE, ARG_OPRL },
++ { "ins1b", OPR(0x10,0x41), BASE, ARG_OPR },
++ { "ins1b", OPRL(0x12,0x41),BASE, ARG_OPRL },
++ { "inslw", OPR(0x10,0x42), BASE, ARG_OPR },
++ { "inslw", OPRL(0x12,0x42),BASE, ARG_OPRL },
++ { "ins2b", OPR(0x10,0x42), BASE, ARG_OPR },
++ { "ins2b", OPRL(0x12,0x42),BASE, ARG_OPRL },
++ { "insll", OPR(0x10,0x43), BASE, ARG_OPR },
++ { "insll", OPRL(0x12,0x43),BASE, ARG_OPRL },
++ { "ins3b", OPR(0x10,0x43), BASE, ARG_OPR },
++ { "ins3b", OPRL(0x12,0x43),BASE, ARG_OPRL },
++ { "inshb", OPR(0x10,0x44), BASE, ARG_OPR },
++ { "inshb", OPRL(0x12,0x44),BASE, ARG_OPRL },
++ { "ins4b", OPR(0x10,0x44), BASE, ARG_OPR },
++ { "ins4b", OPRL(0x12,0x44),BASE, ARG_OPRL },
++ { "inshh", OPR(0x10,0x45), BASE, ARG_OPR },
++ { "inshh", OPRL(0x12,0x45),BASE, ARG_OPRL },
++ { "ins5b", OPR(0x10,0x45), BASE, ARG_OPR },
++ { "ins5b", OPRL(0x12,0x45),BASE, ARG_OPRL },
++ { "inshw", OPR(0x10,0x46), BASE, ARG_OPR },
++ { "inshw", OPRL(0x12,0x46),BASE, ARG_OPRL },
++ { "ins6b", OPR(0x10,0x46), BASE, ARG_OPR },
++ { "ins6b", OPRL(0x12,0x46),BASE, ARG_OPRL },
++ { "inshl", OPR(0x10,0x47), BASE, ARG_OPR },
++ { "inshl", OPRL(0x12,0x47),BASE, ARG_OPRL },
++ { "ins7b", OPR(0x10,0x47), BASE, ARG_OPR },
++ { "ins7b", OPRL(0x12,0x47),BASE, ARG_OPRL },
++ { "sll", OPR(0x10,0x48), BASE, ARG_OPR },
++ { "sll", OPRL(0x12,0x48),BASE, ARG_OPRL },
++ { "srl", OPR(0x10,0x49), BASE, ARG_OPR },
++ { "srl", OPRL(0x12,0x49),BASE, ARG_OPRL },
++ { "sra", OPR(0x10,0x4A), BASE, ARG_OPR },
++ { "sra", OPRL(0x12,0x4A),BASE, ARG_OPRL },
++ { "sllw2", OPR(0x10,0x4C), BASE, ARG_OPR }, //sw6 v0.2a
++ { "sllw2", OPRL(0x12,0x4C),BASE, ARG_OPRL },//sw6 v0.2a
++ { "srlw2", OPR(0x10,0x4D), BASE, ARG_OPR }, //sw6 v0.2a
++ { "srlw2", OPRL(0x12,0x4D),BASE, ARG_OPRL },//sw6 v0.2a
++ { "sraw2", OPR(0x10,0x4E), BASE, ARG_OPR }, //sw6 v0.2a
++ { "sraw2", OPRL(0x12,0x4E),BASE, ARG_OPRL },//sw6 v0.2a
++ { "extlb", OPR(0x10,0x50), BASE, ARG_OPR },
++ { "extlb", OPRL(0x12,0x50),BASE, ARG_OPRL },
++ { "ext0b", OPR(0x10,0x50), BASE, ARG_OPR },
++ { "ext0b", OPRL(0x12,0x50),BASE, ARG_OPRL },
++ { "extlh", OPR(0x10,0x51), BASE, ARG_OPR },
++ { "extlh", OPRL(0x12,0x51),BASE, ARG_OPRL },
++ { "ext1b", OPR(0x10,0x51), BASE, ARG_OPR },
++ { "ext1b", OPRL(0x12,0x51),BASE, ARG_OPRL },
++ { "extlw", OPR(0x10,0x52), BASE, ARG_OPR },
++ { "extlw", OPRL(0x12,0x52),BASE, ARG_OPRL },
++ { "ext2b", OPR(0x10,0x52), BASE, ARG_OPR },
++ { "ext2b", OPRL(0x12,0x52),BASE, ARG_OPRL },
++ { "extll", OPR(0x10,0x53), BASE, ARG_OPR },
++ { "extll", OPRL(0x12,0x53),BASE, ARG_OPRL },
++ { "ext3b", OPR(0x10,0x53), BASE, ARG_OPR },
++ { "ext3b", OPRL(0x12,0x53),BASE, ARG_OPRL },
++ { "exthb", OPR(0x10,0x54), BASE, ARG_OPR },
++ { "exthb", OPRL(0x12,0x54),BASE, ARG_OPRL },
++ { "ext4b", OPR(0x10,0x54), BASE, ARG_OPR },
++ { "ext4b", OPRL(0x12,0x54),BASE, ARG_OPRL },
++ { "exthh", OPR(0x10,0x55), BASE, ARG_OPR },
++ { "exthh", OPRL(0x12,0x55),BASE, ARG_OPRL },
++ { "ext5b", OPR(0x10,0x55), BASE, ARG_OPR },
++ { "ext5b", OPRL(0x12,0x55),BASE, ARG_OPRL },
++ { "exthw", OPR(0x10,0x56), BASE, ARG_OPR },
++ { "exthw", OPRL(0x12,0x56),BASE, ARG_OPRL },
++ { "ext6b", OPR(0x10,0x56), BASE, ARG_OPR },
++ { "ext6b", OPRL(0x12,0x56),BASE, ARG_OPRL },
++ { "exthl", OPR(0x10,0x57), BASE, ARG_OPR },
++ { "exthl", OPRL(0x12,0x57),BASE, ARG_OPRL },
++ { "ext7b", OPR(0x10,0x57), BASE, ARG_OPR },
++ { "ext7b", OPRL(0x12,0x57),BASE, ARG_OPRL },
++ { "ctpop", OPR(0x10,0x58), BASE, ARG_OPRZ1 },
++ { "ctlz", OPR(0x10,0x59), BASE, ARG_OPRZ1 },
++ { "cttz", OPR(0x10,0x5A), BASE, ARG_OPRZ1 },
++ { "masklb", OPR(0x10,0x60), BASE, ARG_OPR },
++ { "masklb", OPRL(0x12,0x60),BASE, ARG_OPRL },
++ { "mask0b", OPR(0x10,0x60), BASE, ARG_OPR },
++ { "mask0b", OPRL(0x12,0x60),BASE, ARG_OPRL },
++ { "masklh", OPR(0x10,0x61), BASE, ARG_OPR },
++ { "masklh", OPRL(0x12,0x61),BASE, ARG_OPRL },
++ { "mask1b", OPR(0x10,0x61), BASE, ARG_OPR },
++ { "mask1b", OPRL(0x12,0x61),BASE, ARG_OPRL },
++ { "masklw", OPR(0x10,0x62), BASE, ARG_OPR },
++ { "masklw", OPRL(0x12,0x62),BASE, ARG_OPRL },
++ { "mask2b", OPR(0x10,0x62), BASE, ARG_OPR },
++ { "mask2b", OPRL(0x12,0x62),BASE, ARG_OPRL },
++ { "maskll", OPR(0x10,0x63), BASE, ARG_OPR },
++ { "maskll", OPRL(0x12,0x63),BASE, ARG_OPRL },
++ { "mask3b", OPR(0x10,0x63), BASE, ARG_OPR },
++ { "mask3b", OPRL(0x12,0x63),BASE, ARG_OPRL },
++ { "maskhb", OPR(0x10,0x64), BASE, ARG_OPR },
++ { "maskhb", OPRL(0x12,0x64),BASE, ARG_OPRL },
++ { "mask4b", OPR(0x10,0x64), BASE, ARG_OPR },
++ { "mask4b", OPRL(0x12,0x64),BASE, ARG_OPRL },
++ { "maskhh", OPR(0x10,0x65), BASE, ARG_OPR },
++ { "maskhh", OPRL(0x12,0x65),BASE, ARG_OPRL },
++ { "mask5b", OPR(0x10,0x65), BASE, ARG_OPR },
++ { "mask5b", OPRL(0x12,0x65),BASE, ARG_OPRL },
++ { "maskhw", OPR(0x10,0x66), BASE, ARG_OPR },
++ { "maskhw", OPRL(0x12,0x66),BASE, ARG_OPRL },
++ { "mask6b", OPR(0x10,0x66), BASE, ARG_OPR },
++ { "mask6b", OPRL(0x12,0x66),BASE, ARG_OPRL },
++ { "maskhl", OPR(0x10,0x67), BASE, ARG_OPR },
++ { "maskhl", OPRL(0x12,0x67),BASE, ARG_OPRL },
++ { "mask7b", OPR(0x10,0x67), BASE, ARG_OPR },
++ { "mask7b", OPRL(0x12,0x67),BASE, ARG_OPRL },
++ { "zap", OPR(0x10,0x68), BASE, ARG_OPR },
++ { "zap", OPRL(0x12,0x68),BASE, ARG_OPRL },
++ { "zapnot", OPR(0x10,0x69), BASE, ARG_OPR },
++ { "zapnot", OPRL(0x12,0x69),BASE, ARG_OPRL },
++ { "sextb", OPR(0x10,0x6A), BASE, ARG_OPRZ1},
++ { "sextb", OPRL(0x12,0x6A),BASE, ARG_OPRLZ1 },
++ { "sexth", OPR(0x10,0x6B), BASE, ARG_OPRZ1 },
++ { "sexth", OPRL(0x12,0x6B),BASE, ARG_OPRLZ1 },
++ { "cmpgeb", OPR(0x10,0x6C), BASE, ARG_OPR },
++ { "cmpgeb", OPRL(0x12,0x6C),BASE, ARG_OPRL },
++ { "fimovs", OPR(0x10,0x70), BASE, { FA, ZB, RC } },
++ { "fimovd", OPR(0x10,0x78), BASE, { FA, ZB, RC } },
++ { "ftoid", OPR(0x10,0x78), BASE, { FA, ZB, RC } },
++ { "seleq", TOPR(0x11,0x0), BASE, ARG_TOPR },
++ { "seleq", TOPRL(0x13,0x0),BASE, ARG_TOPRL },
++ { "selge", TOPR(0x11,0x1), BASE, ARG_TOPR },
++ { "selge", TOPRL(0x13,0x1),BASE, ARG_TOPRL },
++ { "selgt", TOPR(0x11,0x2), BASE, ARG_TOPR },
++ { "selgt", TOPRL(0x13,0x2),BASE, ARG_TOPRL },
++ { "selle", TOPR(0x11,0x3), BASE, ARG_TOPR },
++ { "selle", TOPRL(0x13,0x3),BASE, ARG_TOPRL },
++ { "sellt", TOPR(0x11,0x4), BASE, ARG_TOPR },
++ { "sellt", TOPRL(0x13,0x4),BASE, ARG_TOPRL },
++ { "selne", TOPR(0x11,0x5), BASE, ARG_TOPR },
++ { "selne", TOPRL(0x13,0x5),BASE, ARG_TOPRL },
++ { "sellbc", TOPR(0x11,0x6), BASE, ARG_TOPR },
++ { "sellbc", TOPRL(0x13,0x6),BASE, ARG_TOPRL },
++ { "sellbs", TOPR(0x11,0x7), BASE, ARG_TOPR },
++ { "sellbs", TOPRL(0x13,0x7),BASE, ARG_TOPRL },
++ { "vlog", LOGX(0x14,0x00), BASE, ARG_FMA },
++
++ { "vbicw", PSE_LOGX(0x14,0x30), BASE, { FA , FB , DFC1 } },
++ { "vxorw", PSE_LOGX(0x14,0x3c), BASE, { FA , FB , DFC1 } },
++ { "vandw", PSE_LOGX(0x14,0xc0), BASE, { FA , FB , DFC1 } },
++ { "veqvw", PSE_LOGX(0x14,0xc3), BASE, { FA , FB , DFC1 } },
++ { "vornotw", PSE_LOGX(0x14,0xf3), BASE, { FA , FB , DFC1 } },
++ { "vbisw", PSE_LOGX(0x14,0xfc), BASE, { FA , FB , DFC1 } },
++
++ { "fadds", FP(0x18,0x00), BASE, ARG_FP },
++ { "faddd", FP(0x18,0x01), BASE, ARG_FP },
++ { "fsubs", FP(0x18,0x02), BASE, ARG_FP },
++ { "fsubd", FP(0x18,0x03), BASE, ARG_FP },
++ { "fmuls", FP(0x18,0x04), BASE, ARG_FP },
++ { "fmuld", FP(0x18,0x05), BASE, ARG_FP },
++ { "fdivs", FP(0x18,0x06), BASE, ARG_FP },
++ { "fdivd", FP(0x18,0x07), BASE, ARG_FP },
++ { "fsqrts", FP(0x18,0x08), BASE, ARG_FPZ1 },
++ { "fsqrtd", FP(0x18,0x09), BASE, ARG_FPZ1 },
++ { "fcmpeq", FP(0x18,0x10), BASE, ARG_FP },
++ { "fcmple", FP(0x18,0x11), BASE, ARG_FP },
++ { "fcmplt", FP(0x18,0x12), BASE, ARG_FP },
++ { "fcmpun", FP(0x18,0x13), BASE, ARG_FP },
++
++ { "fcvtsd", FP(0x18,0x20), BASE, ARG_FPZ1 },
++ { "fcvtds", FP(0x18,0x21), BASE, ARG_FPZ1 },
++ { "fcvtdl_g", FP(0x18,0x22), BASE, ARG_FPZ1 },
++ { "fcvtdl_p", FP(0x18,0x23), BASE, ARG_FPZ1 },
++ { "fcvtdl_z", FP(0x18,0x24), BASE, ARG_FPZ1 },
++ { "fcvtdl_n", FP(0x18,0x25), BASE, ARG_FPZ1 },
++ { "fcvtdl", FP(0x18,0x27), BASE, ARG_FPZ1 },
++ { "fcvtwl", FP(0x18,0x28), BASE, ARG_FPZ1 },
++ { "fcvtlw", FP(0x18,0x29), BASE, ARG_FPZ1 },
++ { "fcvtls", FP(0x18,0x2d), BASE, ARG_FPZ1 },
++ { "fcvtld", FP(0x18,0x2f), BASE, ARG_FPZ1 },
++
++ { "fnop", FP(0x18,0x030), BASE, { ZA, ZB, ZC } },
++ { "fclr", FP(0x18,0x030), BASE, { ZA, ZB, FC } },
++ { "fabs", FP(0x18,0x030), BASE, ARG_FPZ1 },
++ { "fcpys", FP(0x18,0x30), BASE, ARG_FP },
++ { "fmov", FP(0x18,0x30), BASE, { FA, RBA, FC } },
++ { "fcpyse", FP(0x18,0x31), BASE, ARG_FP },
++ { "fneg", FP(0x18,0x32), BASE, { FA, RBA, FC } },
++ { "fcpysn", FP(0x18,0x32), BASE, ARG_FP },
++
++ { "ifmovs", FP(0x18,0x40), BASE, { RA, ZB, FC } },
++ { "ifmovd", FP(0x18,0x41), BASE, { RA, ZB, FC } },
++ { "itofd", FP(0x18,0x41), BASE, { RA, ZB, FC } },
++
++ { "rfpcr", FP(0x18,0x50), BASE, { FA, RBA, RCA } },
++ { "wfpcr", FP(0x18,0x51), BASE, { FA, RBA, RCA } },
++ { "setfpec0", FP(0x18,0x54), BASE, ARG_NONE },
++ { "setfpec1", FP(0x18,0x55), BASE, ARG_NONE },
++ { "setfpec2", FP(0x18,0x56), BASE, ARG_NONE },
++ { "setfpec3", FP(0x18,0x57), BASE, ARG_NONE },
++ { "fmas", FMA(0x19,0x00), BASE, ARG_FMA },
++ { "fmad", FMA(0x19,0x01), BASE, ARG_FMA },
++ { "fmss", FMA(0x19,0x02), BASE, ARG_FMA },
++ { "fmsd", FMA(0x19,0x03), BASE, ARG_FMA },
++ { "fnmas", FMA(0x19,0x04), BASE, ARG_FMA },
++ { "fnmad", FMA(0x19,0x05), BASE, ARG_FMA },
++ { "fnmss", FMA(0x19,0x06), BASE, ARG_FMA },
++ { "fnmsd", FMA(0x19,0x07), BASE, ARG_FMA },
++
++//fcmov*(BASE) to fcmov*(BASE) for fcmov* no need in sw64, and fsel*->fcmov* has difference in operands number,so it should not repalce directly. The default FD should be the same FC but not FA
++ { "fseleq", FMA(0x19,0x10), BASE, ARG_FCMOV },
++ { "fselne", FMA(0x19,0x11), BASE, ARG_FCMOV },
++ { "fsellt", FMA(0x19,0x12), BASE, ARG_FCMOV },
++ { "fselle", FMA(0x19,0x13), BASE, ARG_FCMOV },
++ { "fselgt", FMA(0x19,0x14), BASE, ARG_FCMOV },
++ { "fselge", FMA(0x19,0x15), BASE, ARG_FCMOV },
++
++ { "vaddw", FP(0x1A,0x00), BASE, ARG_FP },
++ { "vaddw", FP(0x1A,0x20), BASE, ARG_FPL },
++ { "vsubw", FP(0x1A,0x01), BASE, ARG_FP },
++ { "vsubw", FP(0x1A,0x21), BASE, ARG_FPL },
++ { "vcmpgew", FP(0x1A,0x02), BASE, ARG_FP },
++ { "vcmpgew", FP(0x1A,0x22), BASE, ARG_FPL },
++ { "vcmpeqw", FP(0x1A,0x03), BASE, ARG_FP },
++ { "vcmpeqw", FP(0x1A,0x23), BASE, ARG_FPL },
++ { "vcmplew", FP(0x1A,0x04), BASE, ARG_FP },
++ { "vcmplew", FP(0x1A,0x24), BASE, ARG_FPL },
++ { "vcmpltw", FP(0x1A,0x05), BASE, ARG_FP },
++ { "vcmpltw", FP(0x1A,0x25), BASE, ARG_FPL },
++ { "vcmpulew", FP(0x1A,0x06), BASE, ARG_FP },
++ { "vcmpulew", FP(0x1A,0x26), BASE, ARG_FPL },
++ { "vcmpultw", FP(0x1A,0x07), BASE, ARG_FP },
++ { "vcmpultw", FP(0x1A,0x27), BASE, ARG_FPL },
++
++ { "vsllw", FP(0x1A,0x08), BASE, ARG_FP },
++ { "vsllw", FP(0x1A,0x28), BASE, ARG_FPL },
++ { "vsrlw", FP(0x1A,0x09), BASE, ARG_FP },
++ { "vsrlw", FP(0x1A,0x29), BASE, ARG_FPL },
++ { "vsraw", FP(0x1A,0x0A), BASE, ARG_FP },
++ { "vsraw", FP(0x1A,0x2A), BASE, ARG_FPL },
++ { "vrolw", FP(0x1A,0x0B), BASE, ARG_FP },
++ { "vrolw", FP(0x1A,0x2B), BASE, ARG_FPL },
++ { "sllow", FP(0x1A,0x0C), BASE, ARG_FP },
++ { "sllow", FP(0x1A,0x2C), BASE, ARG_FPL },
++ { "srlow", FP(0x1A,0x0D), BASE, ARG_FP },
++ { "srlow", FP(0x1A,0x2D), BASE, ARG_FPL },
++ { "vaddl", FP(0x1A,0x0E), BASE, ARG_FP },
++ { "vaddl", FP(0x1A,0x2E), BASE, ARG_FPL },
++ { "vsubl", FP(0x1A,0x0F), BASE, ARG_FP },
++ { "vsubl", FP(0x1A,0x2F), BASE, ARG_FPL },
++ { "ctpopow", FP(0x1A,0x18), BASE, { FA, ZB, DFC1 } },
++ { "ctlzow", FP(0x1A,0x19), BASE, { FA, ZB, DFC1 } },
++ { "vucaddw", FP(0x1A,0x40), BASE, ARG_FP },
++ { "vucaddw", FP(0x1A,0x60), BASE, ARG_FPL },
++ { "vucsubw", FP(0x1A,0x41), BASE, ARG_FP },
++ { "vucsubw", FP(0x1A,0x61), BASE, ARG_FPL },
++ { "vucaddh", FP(0x1A,0x42), BASE, ARG_FP },
++ { "vucaddh", FP(0x1A,0x62), BASE, ARG_FPL },
++ { "vucsubh", FP(0x1A,0x43), BASE, ARG_FP },
++ { "vucsubh", FP(0x1A,0x63), BASE, ARG_FPL },
++ { "vucaddb", FP(0x1A,0x44), BASE, ARG_FP },
++ { "vucaddb", FP(0x1A,0x64), BASE, ARG_FPL },
++ { "vucsubb", FP(0x1A,0x45), BASE, ARG_FP },
++ { "vucsubb", FP(0x1A,0x65), BASE, ARG_FPL },
++ { "vadds", FP(0x1A,0x80), BASE, ARG_FP },
++ { "v4adds", FP(0x1A,0x80), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vaddd", FP(0x1A,0x81), BASE, ARG_FP },
++ { "v4addd", FP(0x1A,0x81), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vsubs", FP(0x1A,0x82), BASE, ARG_FP },
++ { "v4subs", FP(0x1A,0x82), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vsubd", FP(0x1A,0x83), BASE, ARG_FP },
++ { "v4subd", FP(0x1A,0x83), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vmuls", FP(0x1A,0x84), BASE, ARG_FP },
++ { "v4muls", FP(0x1A,0x84), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vmuld", FP(0x1A,0x85), BASE, ARG_FP },
++ { "v4muld", FP(0x1A,0x85), BASE, ARG_FP },/* pseudo SW6 SIMD*/
++ { "vdivs", FP(0x1A,0x86), BASE, ARG_FP },
++ { "vdivd", FP(0x1A,0x87), BASE, ARG_FP },
++ { "vsqrts", FP(0x1A,0x88), BASE, ARG_FPZ1 },
++ { "vsqrtd", FP(0x1A,0x89), BASE, ARG_FPZ1 },
++ { "vfcmpeq", FP(0x1A,0x8C), BASE, ARG_FP },
++ { "vfcmple", FP(0x1A,0x8D), BASE, ARG_FP },
++ { "vfcmplt", FP(0x1A,0x8E), BASE, ARG_FP },
++ { "vfcmpun", FP(0x1A,0x8F), BASE, ARG_FP },
++ { "vcpys", FP(0x1A,0x90), BASE, ARG_FP },
++ { "vfmov", FP(0x1A,0x90), BASE, { FA, RBA, FC } }, //V1.1
++ { "vcpyse", FP(0x1A,0x91), BASE, ARG_FP }, // SW6 1.0
++ { "vcpysn", FP(0x1A,0x92), BASE, ARG_FP }, // SW6 1.0
++ { "vmas", FMA(0x1B,0x00), BASE, ARG_FMA },
++ { "vmad", FMA(0x1B,0x01), BASE, ARG_FMA },
++ { "vmss", FMA(0x1B,0x02), BASE, ARG_FMA },
++ { "vmsd", FMA(0x1B,0x03), BASE, ARG_FMA },
++ { "vnmas", FMA(0x1B,0x04), BASE, ARG_FMA },
++ { "vnmad", FMA(0x1B,0x05), BASE, ARG_FMA },
++ { "vnmss", FMA(0x1B,0x06), BASE, ARG_FMA },
++ { "vnmsd", FMA(0x1B,0x07), BASE, ARG_FMA },
++ { "vfseleq", FMA(0x1B,0x10), BASE, ARG_FMA },
++ { "vfsellt", FMA(0x1B,0x12), BASE, ARG_FMA },
++ { "vfselle", FMA(0x1B,0x13), BASE, ARG_FMA },
++ { "vseleqw", FMA(0x1B,0x18), BASE, ARG_FMA },
++ { "vseleqw", FMA(0x1B,0x38), BASE, ARG_FMAL },
++ { "vsellbcw", FMA(0x1B,0x19), BASE, ARG_FMA },
++ { "vsellbcw", FMA(0x1B,0x39), BASE, ARG_FMAL },
++ { "vselltw", FMA(0x1B,0x1A), BASE, ARG_FMA },
++ { "vselltw", FMA(0x1B,0x3A), BASE, ARG_FMAL },
++ { "vsellew", FMA(0x1B,0x1B), BASE, ARG_FMA },
++ { "vsellew", FMA(0x1B,0x3B), BASE, ARG_FMAL },
++ { "vinsw", FMA(0x1B,0x20), BASE, ARG_FMAL },
++ { "vinsf", FMA(0x1B,0x21), BASE, ARG_FMAL },
++ { "vextw", FMA(0x1B,0x22), BASE, { FA, FMALIT, DFC1 }},
++ { "vextf", FMA(0x1B,0x23), BASE, { FA, FMALIT, DFC1 }},
++ { "vcpyw", FMA(0x1B,0x24), BASE, { FA, DFC1 }},
++ { "vcpyf", FMA(0x1B,0x25), BASE, { FA, DFC1 }},
++ { "vconw", FMA(0x1B,0x26), BASE, ARG_FMA },
++ { "vshfw", FMA(0x1B,0x27), BASE, ARG_FMA },
++ { "vcons", FMA(0x1B,0x28), BASE, ARG_FMA },
++ { "vcond", FMA(0x1B,0x29), BASE, ARG_FMA },
++ { "vldw_u", ATMEM(0x1C,0x0), BASE, ARG_VUAMEM },
++ { "vstw_u", ATMEM(0x1C,0x1), BASE, ARG_VUAMEM },
++ { "vlds_u", ATMEM(0x1C,0x2), BASE, ARG_VUAMEM },
++ { "vsts_u", ATMEM(0x1C,0x3), BASE, ARG_VUAMEM },
++ { "vldd_u", ATMEM(0x1C,0x4), BASE, ARG_VUAMEM },
++ { "vstd_u", ATMEM(0x1C,0x5), BASE, ARG_VUAMEM },
++ { "vstw_ul", ATMEM(0x1C,0x8), BASE, ARG_VUAMEM },
++ { "vstw_uh", ATMEM(0x1C,0x9), BASE, ARG_VUAMEM },
++ { "vsts_ul", ATMEM(0x1C,0xA), BASE, ARG_VUAMEM },
++ { "vsts_uh", ATMEM(0x1C,0xB), BASE, ARG_VUAMEM },
++ { "vstd_ul", ATMEM(0x1C,0xC), BASE, ARG_VUAMEM },
++ { "vstd_uh", ATMEM(0x1C,0xD), BASE, ARG_VUAMEM },
++ { "vldd_nc", ATMEM(0x1C,0xE), BASE, ARG_VUAMEM },
++ { "vstd_nc", ATMEM(0x1C,0xF), BASE, ARG_VUAMEM },
++ { "flushd", MEM(0x20), BASE, ARG_PREFETCH },
++ { "ldbu", MEM(0x20), BASE, ARG_MEM },
++ { "evictdg", MEM(0x21), BASE, ARG_PREFETCH },
++ { "ldhu", MEM(0x21), BASE, ARG_MEM },
++ { "s_fillcs", MEM(0x22), BASE, ARG_PREFETCH },
++ { "ldw", MEM(0x22), BASE, ARG_MEM },
++ { "wh64", MFC(0x22,0xF800), BASE, { ZA, PRB } },
++ { "s_fillde", MEM(0x23), BASE, ARG_PREFETCH },
++ { "ldl", MEM(0x23), BASE, ARG_MEM },
++ { "evictdl", MEM(0x24), BASE, ARG_PREFETCH },
++ { "ldl_u", MEM(0x24), BASE, ARG_MEM },
++ { "pri_ldw/p", SW6HWMEM(0x25,0x0), BASE, ARG_SW6HWMEM },
++ { "pri_ldw_inc/p", SW6HWMEM(0x25,0x2), BASE, ARG_SW6HWMEM },
++ { "pri_ldw_dec/p", SW6HWMEM(0x25,0x4), BASE, ARG_SW6HWMEM },
++ { "pri_ldw_set/p", SW6HWMEM(0x25,0x6), BASE, ARG_SW6HWMEM },
++ { "pri_ldw/v", SW6HWMEM(0x25,0x8), BASE, ARG_SW6HWMEM },
++ { "pri_ldw/vpte", SW6HWMEM(0x25,0xA), BASE, ARG_SW6HWMEM },
++ { "pri_ldl/p", SW6HWMEM(0x25,0x1), BASE, ARG_SW6HWMEM },
++ { "pri_ldl_inc/p", SW6HWMEM(0x25,0x3), BASE, ARG_SW6HWMEM },
++ { "pri_ldl_dec/p", SW6HWMEM(0x25,0x5), BASE, ARG_SW6HWMEM },
++ { "pri_ldl_set/p", SW6HWMEM(0x25,0x7), BASE, ARG_SW6HWMEM },
++ { "pri_ldl/v", SW6HWMEM(0x25,0x9), BASE, ARG_SW6HWMEM },
++ { "pri_ldl/vpte", SW6HWMEM(0x25,0xB), BASE, ARG_SW6HWMEM },
++ { "fillde", MEM(0x26), BASE, ARG_PREFETCH },
++ { "flds", MEM(0x26), BASE, ARG_FMEM },
++ { "fillde_e", MEM(0x27), BASE, ARG_PREFETCH },
++ { "fldd", MEM(0x27), BASE, ARG_FMEM },
++
++ { "stb", MEM(0x28), BASE, ARG_MEM },
++ { "sth", MEM(0x29), BASE, ARG_MEM },
++ { "stw", MEM(0x2A), BASE, ARG_MEM },
++ { "stl", MEM(0x2B), BASE, ARG_MEM },
++ { "stl_u", MEM(0x2C), BASE, ARG_MEM },
++ { "pri_stw/p", SW6HWMEM(0x2D,0x0), BASE, ARG_SW6HWMEM },
++ { "pri_stw/v", SW6HWMEM(0x2D,0x8), BASE, ARG_SW6HWMEM },
++ { "pri_stl/p", SW6HWMEM(0x2D,0x1), BASE, ARG_SW6HWMEM },
++ { "pri_stl/v", SW6HWMEM(0x2D,0x9), BASE, ARG_SW6HWMEM },
++ { "fsts", MEM(0x2E), BASE, ARG_FMEM },
++ { "fstd", MEM(0x2F), BASE, ARG_FMEM },
++ { "beq", BRA(0x30), BASE, ARG_BRA },
++ { "bne", BRA(0x31), BASE, ARG_BRA },
++ { "blt", BRA(0x32), BASE, ARG_BRA },
++ { "ble", BRA(0x33), BASE, ARG_BRA },
++ { "bgt", BRA(0x34), BASE, ARG_BRA },
++ { "bge", BRA(0x35), BASE, ARG_BRA },
++ { "blbc", BRA(0x36), BASE, ARG_BRA },
++ { "blbs", BRA(0x37), BASE, ARG_BRA },
++
++ { "fbeq", BRA(0x38), BASE, ARG_FBRA },
++ { "fbne", BRA(0x39), BASE, ARG_FBRA },
++ { "fblt", BRA(0x3A), BASE, ARG_FBRA },
++ { "fble", BRA(0x3B), BASE, ARG_FBRA },
++ { "fbgt", BRA(0x3C), BASE, ARG_FBRA },
++ { "fbge", BRA(0x3D), BASE, ARG_FBRA },
++ { "ldi", MEM(0x3E), BASE, { RA, MDISP, ZB } },
++ { "ldi", MEM(0x3E), BASE, ARG_MEM },
++ { "ldih", MEM(0x3F), BASE, { RA, MDISP, ZB } },
++ { "ldih", MEM(0x3F), BASE, ARG_MEM },
++ { "unop", MEM_(0x3F) | (30 << 16), MEM_MASK, BASE , { ZA } },
++};
++
++const unsigned sw64_num_opcodes = sizeof(sw64_opcodes)/sizeof(*sw64_opcodes);
diff --git a/gdb-14.1-add-support-for-SW64-004.patch b/gdb-14.1-add-support-for-SW64-004.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2647c23339ae54acf06564b8881220696291a1b8
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-004.patch
@@ -0,0 +1,1959 @@
+diff -Naur gdb-14.1-after-patch/config.guess gdb-14.1-sw64/config.guess
+--- gdb-14.1-after-patch/config.guess 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/config.guess 2025-03-03 10:59:13.020000000 +0800
+@@ -1158,6 +1158,9 @@
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ GUESS=$UNAME_MACHINE-unknown-linux-$LIBC
+ ;;
++ sw_64:Linux:*:*)
++ GUESS=$UNAME_MACHINE-unknown-linux-$LIBC
++ ;;
+ tile*:Linux:*:*)
+ GUESS=$UNAME_MACHINE-unknown-linux-$LIBC
+ ;;
+diff -Naur gdb-14.1-after-patch/config.sub gdb-14.1-sw64/config.sub
+--- gdb-14.1-after-patch/config.sub 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/config.sub 2025-03-03 10:59:13.020000000 +0800
+@@ -1130,6 +1130,9 @@
+ arm64-* | aarch64le-*)
+ cpu=aarch64
+ ;;
++ sw_64-*)
++ cpu=sw64
++ ;;
+
+ # Recognize the canonical CPU Types that limit and/or modify the
+ # company names they are paired with.
+@@ -1269,6 +1272,7 @@
+ | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \
+ | spu \
++ | sw64 \
+ | tahoe \
+ | thumbv7* \
+ | tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \
+diff -Naur gdb-14.1-after-patch/configure.ac gdb-14.1-sw64/configure.ac
+--- gdb-14.1-after-patch/configure.ac 2023-12-03 13:25:53.000000000 +0800
++++ gdb-14.1-sw64/configure.ac 2025-03-03 10:59:13.030000000 +0800
+@@ -363,7 +363,7 @@
+ # Check for target supported by gold.
+ case "${target}" in
+ i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \
+- | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-* | loongarch*-*-*)
++ | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-* | loongarch*-*-* | sw64*-*-*)
+ configdirs="$configdirs gold"
+ if test x${ENABLE_GOLD} = xdefault; then
+ default_ld=gold
+@@ -927,6 +927,9 @@
+ sparc*-*-*)
+ libgloss_dir=sparc
+ ;;
++ sw64*-*-*)
++ libgloss_dir=sw64
++ ;;
+ esac
+
+ # Disable newlib and libgloss for various target OSes.
+@@ -1301,6 +1304,8 @@
+ loongarch*-*-*)
+ noconfigdirs="$noconfigdirs gprof"
+ ;;
++ sw64*-*-linux*)
++ ;;
+ esac
+
+ # If we aren't building newlib, then don't build libgloss, since libgloss
+diff -Naur gdb-14.1-after-patch/gdbserver/configure.srv gdb-14.1-sw64/gdbserver/configure.srv
+--- gdb-14.1-after-patch/gdbserver/configure.srv 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/configure.srv 2025-03-03 10:59:13.690000000 +0800
+@@ -159,6 +159,19 @@
+ srv_linux_regsets=yes
+ srv_linux_thread_db=yes
+ ;;
++ sw64*-*-linux*) srv_regobj="sw64-linux.o"
++ srv_tgtobj="linux-sw64-low.o"
++ srv_tgtobj="${srv_tgtobj} ${srv_linux_obj}"
++ srv_tgtobj="${srv_tgtobj} nat/sw64-linux-watch.o"
++ srv_xmlfiles="sw64-linux.xml"
++ srv_xmlfiles="${srv_xmlfiles} sw64-cpu.xml"
++ srv_xmlfiles="${srv_xmlfiles} sw64-fpu.xml"
++ srv_xmlfiles="${srv_xmlfiles} sw64-efu.xml"
++ srv_xmlfiles="${srv_xmlfiles} sw64-vec.xml"
++ srv_linux_regsets=no
++ srv_linux_usrregs=yes
++ srv_linux_thread_db=yes
++ ;;
+ mips*-*-linux*) srv_regobj="mips-linux.o"
+ srv_regobj="${srv_regobj} mips-dsp-linux.o"
+ srv_regobj="${srv_regobj} mips64-linux.o"
+diff -Naur gdb-14.1-after-patch/gdbserver/linux-sw64-low.cc gdb-14.1-sw64/gdbserver/linux-sw64-low.cc
+--- gdb-14.1-after-patch/gdbserver/linux-sw64-low.cc 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/linux-sw64-low.cc 2025-03-03 10:59:13.700000000 +0800
+@@ -0,0 +1,928 @@
++/* GNU/Linux/SW64 specific low level interface, for the remote server for GDB.
++ Copyright (C) 1995-2023 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see . */
++
++#include "server.h"
++#include "linux-low.h"
++
++#include "nat/gdb_ptrace.h"
++#include
++
++#include "nat/sw64-linux-watch.h"
++#include "gdb_proc_service.h"
++
++/* Linux target op definitions for the SW64 architecture. */
++
++class sw64_target : public linux_process_target
++{
++public:
++
++ const regs_info *get_regs_info () override;
++
++ const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
++
++ bool supports_z_point_type (char z_type) override;
++
++protected:
++
++ void low_arch_setup () override;
++
++ bool low_cannot_fetch_register (int regno) override;
++
++ bool low_cannot_store_register (int regno) override;
++
++ bool low_fetch_register (regcache *regcache, int regno) override;
++
++ bool low_supports_breakpoints () override;
++
++ CORE_ADDR low_get_pc (regcache *regcache) override;
++
++ void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
++
++ int low_decr_pc_after_break () override;
++
++ bool low_breakpoint_at (CORE_ADDR pc) override;
++
++ int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
++ int size, raw_breakpoint *bp) override;
++
++ int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
++ int size, raw_breakpoint *bp) override;
++
++ bool low_stopped_by_watchpoint () override;
++
++ CORE_ADDR low_stopped_data_address () override;
++
++#if 0
++ void low_collect_ptrace_register (regcache *regcache, int regno,
++ char *buf) override;
++
++ void low_supply_ptrace_register (regcache *regcache, int regno,
++ const char *buf) override;
++#endif
++
++ arch_process_info *low_new_process () override;
++
++ void low_delete_process (arch_process_info *info) override;
++
++ void low_new_thread (lwp_info *) override;
++
++ void low_delete_thread (arch_lwp_info *) override;
++
++ void low_new_fork (process_info *parent, process_info *child) override;
++
++ void low_prepare_to_resume (lwp_info *lwp) override;
++};
++
++/* The singleton target ops object. */
++
++static sw64_target the_sw64_target;
++
++/* Defined in auto-generated file sw64-linux.c. */
++void init_registers_sw64_linux (void);
++extern const struct target_desc *tdesc_sw64_linux;
++
++#ifndef PTRACE_GET_THREAD_AREA
++#define PTRACE_GET_THREAD_AREA 25
++#endif
++
++#ifdef HAVE_SYS_REG_H
++#include
++#endif
++
++#define sw64_num_regs 165
++
++#include
++#include
++
++union sw64_register
++{
++ unsigned char buf[8];
++ long long reg64;
++};
++
++int gcc_backtrace ();
++void exc_handler (int, siginfo_t*, void*);
++extern int tohex (int nib);
++extern int bin2hex (const gdb_byte *bin, char *hex, int count);
++extern long dva_match_addr;
++extern long status_cg[];
++extern struct lwp_info *add_lwp (ptid_t ptid);
++
++enum sw64_hw_bp_type sw64_hw_bp_type_from_raw_type (enum raw_bkpt_type raw_type);
++
++/* Return the ptrace ``address'' of register REGNO. */
++#if 0
++#define sw64_base_regs \
++ 0, 1, 2, 3, 4, 5, 6, 7, \
++ 8, 9, 10, 11, 12, 13, 14, 15, \
++ 16, 17, 18, 19, 20, 21, 22, 23, \
++ 24, 25, 26, 27, 28, 29, 30, -1, \
++ \
++ FPR_BASE, FPR_BASE + 1, FPR_BASE + 2, FPR_BASE + 3, \
++ FPR_BASE + 4, FPR_BASE + 5, FPR_BASE + 6, FPR_BASE + 7, \
++ FPR_BASE + 8, FPR_BASE + 9, FPR_BASE + 10, FPR_BASE + 11, \
++ FPR_BASE + 12, FPR_BASE + 13, FPR_BASE + 14, FPR_BASE + 15, \
++ FPR_BASE + 16, FPR_BASE + 17, FPR_BASE + 18, FPR_BASE + 19, \
++ FPR_BASE + 20, FPR_BASE + 21, FPR_BASE + 22, FPR_BASE + 23, \
++ FPR_BASE + 24, FPR_BASE + 25, FPR_BASE + 26, FPR_BASE + 27, \
++ FPR_BASE + 28, FPR_BASE + 29, FPR_BASE + 30, FPR_BASE + 31, \
++ \
++ PC, -1, -1
++
++static int mips_regmap[sw64_num_regs] = {
++ sw64_base_regs,
++ 0
++};
++#endif
++
++/*
++ * The following table maps a register index into the stack offset at
++ * which the register is saved. Register indices are 0-31 for integer
++ * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and
++ * zero have no stack-slot and need to be treated specially (see
++ * get_reg/put_reg below).
++ */
++
++/* Return the ptrace 'address' of register REGNO. */
++#define FPR_BASE 32
++
++/* 0~63 general, 64 pc, 65-69 hwbpt/da_match,70-101 simd */
++static int sw64_regmap[sw64_num_regs] = {
++ 0, 1, 2, 3, 4, 5, 6, 7,
++ 8, 9, 10, 11, 12, 13, 14, 15,
++ 16, 17, 18, 19, 20, 21, 22, 23,
++ 24, 25, 26, 27, 28, 29, 30, 31,
++ FPR_BASE, FPR_BASE + 1, FPR_BASE + 2, FPR_BASE + 3,
++ FPR_BASE + 4, FPR_BASE + 5, FPR_BASE + 6, FPR_BASE + 7,
++ FPR_BASE + 8, FPR_BASE + 9, FPR_BASE + 10, FPR_BASE + 11,
++ FPR_BASE + 12, FPR_BASE + 13, FPR_BASE + 14, FPR_BASE + 15,
++ FPR_BASE + 16, FPR_BASE + 17, FPR_BASE + 18, FPR_BASE + 19,
++ FPR_BASE + 20, FPR_BASE + 21, FPR_BASE + 22, FPR_BASE + 23,
++ FPR_BASE + 24, FPR_BASE + 25, FPR_BASE + 26, FPR_BASE + 27,
++ FPR_BASE + 28, FPR_BASE + 29, FPR_BASE + 30, FPR_BASE + 31,
++ REG_PC, -1, -1, /* NULL and unique */
++ REG_V0F1, REG_V0F1 + 1, REG_V0F1 + 2, REG_V0F1 + 3,
++ REG_V0F1 + 4, REG_V0F1 + 5, REG_V0F1 + 6, REG_V0F1 + 7,
++ REG_V0F1 + 8, REG_V0F1 + 9, REG_V0F1 + 10, REG_V0F1 + 11,
++ REG_V0F1 + 12, REG_V0F1 + 13, REG_V0F1 + 14, REG_V0F1 + 15,
++ REG_V0F1 + 16, REG_V0F1 + 17, REG_V0F1 + 18, REG_V0F1 + 19,
++ REG_V0F1 + 20, REG_V0F1 + 21, REG_V0F1 + 22, REG_V0F1 + 23,
++ REG_V0F1 + 24, REG_V0F1 + 25, REG_V0F1 + 26, REG_V0F1 + 27,
++ REG_V0F1 + 28, REG_V0F1 + 29, REG_V0F1 + 30, REG_V0F1 + 31,
++ REG_V0F2, REG_V0F2 + 1, REG_V0F2 + 2, REG_V0F2 + 3,
++ REG_V0F2 + 4, REG_V0F2 + 5, REG_V0F2 + 6, REG_V0F2 + 7,
++ REG_V0F2 + 8, REG_V0F2 + 9, REG_V0F2 + 10, REG_V0F2 + 11,
++ REG_V0F2 + 12, REG_V0F2 + 13, REG_V0F2 + 14, REG_V0F2 + 15,
++ REG_V0F2 + 16, REG_V0F2 + 17, REG_V0F2 + 18, REG_V0F2 + 19,
++ REG_V0F2 + 20, REG_V0F2 + 21, REG_V0F2 + 22, REG_V0F2 + 23,
++ REG_V0F2 + 24, REG_V0F2 + 25, REG_V0F2 + 26, REG_V0F2 + 27,
++ REG_V0F2 + 28, REG_V0F2 + 29, REG_V0F2 + 30, REG_V0F2 + 31,
++ REG_V0F3, REG_V0F3 + 1, REG_V0F3 + 2, REG_V0F3 + 3,
++ REG_V0F3 + 4, REG_V0F3 + 5, REG_V0F3 + 6, REG_V0F3 + 7,
++ REG_V0F3 + 8, REG_V0F3 + 9, REG_V0F3 + 10, REG_V0F3 + 11,
++ REG_V0F3 + 12, REG_V0F3 + 13, REG_V0F3 + 14, REG_V0F3 + 15,
++ REG_V0F3 + 16, REG_V0F3 + 17, REG_V0F3 + 18, REG_V0F3 + 19,
++ REG_V0F3 + 20, REG_V0F3 + 21, REG_V0F3 + 22, REG_V0F3 + 23,
++ REG_V0F3 + 24, REG_V0F3 + 25, REG_V0F3 + 26, REG_V0F3 + 27,
++ REG_V0F3 + 28, REG_V0F3 + 29, REG_V0F3 + 30, REG_V0F3 + 31,
++ -1,
++ 0
++};
++
++static const struct target_desc *
++sw64_read_description (void)
++{
++ return tdesc_sw64_linux;
++}
++
++void
++sw64_target::low_arch_setup ()
++{
++ current_process ()->tdesc = sw64_read_description ();
++}
++
++static struct usrregs_info *
++get_usrregs_info (void)
++{
++ const struct regs_info *regs_info = the_linux_target->get_regs_info ();
++
++ return regs_info->usrregs;
++}
++
++/* Per-process arch-specific data we want to keep. */
++
++struct arch_process_info
++{
++#if 0
++ /* -1 if the kernel and/or CPU do not support watch registers.
++ 1 if watch_readback is valid and we can read style, num_valid
++ and the masks.
++ 0 if we need to read the watch_readback. */
++
++ int watch_readback_valid;
++
++ /* Cached watch register read values. */
++
++ struct pt_watch_regs watch_readback;
++
++ /* Current watchpoint requests for this process. */
++
++ struct mips_watchpoint *current_watches;
++
++ /* The current set of watch register values for writing the
++ registers. */
++
++ struct pt_watch_regs watch_mirror;
++#endif
++};
++
++/* Per-thread arch-specific data we want to keep. */
++#if 0
++struct arch_lwp_info
++{
++ /* Non-zero if our copy differs from what's recorded in the thread. */
++ int watch_registers_changed;
++};
++#endif
++
++/* From sw64-linux-nat.c. */
++
++/* Pseudo registers can not be read. ptrace does not provide a way to
++ read (or set) PS_REGNUM, and there's no point in reading or setting
++ ZERO_REGNUM, it's always 0. We also can not set BADVADDR, CAUSE,
++ or FCRIR via ptrace(). */
++
++bool
++sw64_target::low_cannot_fetch_register (int regno)
++{
++ const struct target_desc *tdesc;
++ tdesc = current_process ()->tdesc;
++
++ if (get_usrregs_info ()->regmap[regno] == -1)
++ return true;
++
++ if (get_regs_info ()->usrregs->regmap[regno] == -1)
++ return true;
++
++ if (find_regno (tdesc, "r31") == regno)
++ return true;
++
++ return false;
++}
++
++bool
++sw64_target::low_cannot_store_register (int regno)
++{
++ const struct target_desc *tdesc;
++ tdesc = current_process ()->tdesc;
++
++ if (get_usrregs_info ()->regmap[regno] == -1)
++ return true;
++
++ if (get_regs_info ()->usrregs->regmap[regno] == -1)
++ return true;
++
++ if (find_regno (tdesc, "r31") == regno)
++ return true;
++
++ return false;
++}
++
++bool
++sw64_target::low_fetch_register (regcache *regcache, int regno)
++{
++ const struct target_desc *tdesc = current_process ()->tdesc;
++
++ if (find_regno (tdesc, "r31") == regno)
++ {
++ supply_register_zeroed (regcache, regno);
++ return true;
++ }
++
++ return false;
++}
++
++bool
++sw64_target::low_supports_breakpoints ()
++{
++ return true;
++}
++
++CORE_ADDR
++sw64_target::low_get_pc (regcache *regcache)
++{
++ union sw64_register pc;
++ collect_register_by_name (regcache, "pc", pc.buf);
++ return pc.reg64;
++}
++
++void
++sw64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
++{
++ union sw64_register newpc;
++ newpc.reg64 = pc;
++
++ supply_register_by_name (regcache, "pc", newpc.buf);
++}
++
++int
++sw64_target::low_decr_pc_after_break ()
++{
++ return 4;
++}
++
++/* Correct in either endianness. */
++static const unsigned int sw64_breakpoint = 0x00000080;
++#define sw64_breakpoint_len 4
++
++/* Implementation of target ops method "sw_breakpoint_from_kind". */
++
++const gdb_byte *
++sw64_target::sw_breakpoint_from_kind (int kind, int *size)
++{
++ *size = sw64_breakpoint_len;
++ return (const gdb_byte *) &sw64_breakpoint;
++}
++
++bool
++sw64_target::low_breakpoint_at (CORE_ADDR where)
++{
++ unsigned int insn;
++
++ read_memory (where, (unsigned char *) &insn, 4);
++ if (insn == sw64_breakpoint)
++ return true;
++
++ /* If necessary, recognize more trap instructions here. GDB only uses the
++ one. */
++ return false;
++}
++
++/* Mark the watch registers of lwp, represented by ENTRY, as changed. */
++
++static void
++update_watch_registers_callback (thread_info *thread)
++{
++ struct lwp_info *lwp = get_thread_lwp (thread);
++
++ /* The actual update is done later just before resuming the lwp,
++ we just mark that the registers need updating. */
++ lwp->arch_private->watch_registers_changed = 1;
++
++ /* If the lwp isn't stopped, force it to momentarily pause, so
++ we can update its watch registers. */
++ if (!lwp->stopped)
++ linux_stop_lwp (lwp);
++}
++
++/* This is the implementation of linux target ops method
++ low_new_process. */
++
++arch_process_info *
++sw64_target::low_new_process ()
++{
++ struct arch_process_info *info = XCNEW (struct arch_process_info);
++
++ return info;
++}
++
++/* This is the implementation of linux target ops method
++ low_delete_process. */
++
++void
++sw64_target::low_delete_process (arch_process_info *info)
++{
++ xfree (info);
++}
++
++/* This is the implementation of linux target ops method low_new_thread.
++ Mark the watch registers as changed, so the threads' copies will
++ be updated. */
++
++void
++sw64_target::low_new_thread (lwp_info *lwp)
++{
++ struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
++
++ info->watch_registers_changed = 1;
++
++ lwp->arch_private = info;
++}
++
++/* Function to call when a thread is being deleted. */
++
++void
++sw64_target::low_delete_thread (arch_lwp_info *arch_lwp)
++{
++ xfree (arch_lwp);
++}
++
++/* Create a new sw64_watchpoint and add it to the list. */
++
++
++/* Hook to call when a new fork is attached. */
++
++void
++sw64_target::low_new_fork (process_info *parent,
++ process_info *child)
++{
++}
++
++enum sw64_hw_bp_type
++sw64_hw_bp_type_from_raw_type (enum raw_bkpt_type raw_type)
++{
++ switch (raw_type)
++ {
++ case raw_bkpt_type_hw:
++ return sw64_none;
++ case raw_bkpt_type_write_wp:
++ return sw64_write;
++ case raw_bkpt_type_read_wp:
++ return sw64_read;
++ case raw_bkpt_type_access_wp:
++ return sw64_access;
++ case raw_bkpt_type_value_wp:
++ return sw64_vstore;
++ default:
++ internal_error ("bad raw breakpoint type %d", (int) raw_type);
++ }
++}
++
++/* This is the implementation of linux target ops method
++ low_prepare_to_resume. If the watch regs have changed, update the
++ thread's copies. */
++
++void
++sw64_target::low_prepare_to_resume (lwp_info *lwp)
++{
++ int i = 0;
++ ptid_t ptid = ptid_of (get_lwp_thread (lwp));
++ //struct process_info *proc = find_process_pid (ptid.pid ());
++ //struct arch_process_info *priv = proc->priv->arch_private;
++ struct arch_lwp_info *priv = lwp->arch_private;
++ pid_t lwpid = ptid.lwp ();
++
++ if (!(lwp->arch_private->watch_registers_changed))
++ return;
++
++ if ( priv->wpt[1].valid )
++ {
++ //debug("write master dv_match %#lx, mask %#lx", priv->wpt[1].match, priv->wpt[1].mask);
++ store_debug_register (lwpid, M_DV_MATCH, priv->wpt[1].match);
++ store_debug_register (lwpid, M_DV_MATCH+1, priv->wpt[1].mask);
++ }
++ if ( priv->wpt[0].valid )
++ {
++ //debug("write master da_match %#lx, mask %#lx", priv->wpt->match, priv->wpt->mask);
++ store_debug_register (lwpid, M_DA_MATCH, priv->wpt->match);
++ store_debug_register (lwpid, M_DA_MASK, priv->wpt->mask);
++ }
++
++ i = (priv->wpt[1].valid<<1) | priv->wpt[0].valid;
++
++ // setting dv_ctl
++ switch (i)
++ {
++ //da_match
++ case 0:
++ case 1:
++ //store_debug_register (lwpid, M_DV_MATCH+2, 0L);
++ break;
++ //dv_match
++ case 2:
++ store_debug_register (lwpid, M_DV_MATCH+2, 1);
++ break;
++ //dva_match
++ case 3:
++ store_debug_register (lwpid, M_DV_MATCH+2, 3);
++ break;
++ default:
++ ;;
++ }
++
++ lwp->arch_private->watch_registers_changed = 0;
++
++#if 0
++ if (lwp->arch_private->watch_registers_changed)
++ {
++ /* Only update the watch registers if we have set or unset a
++ watchpoint already. */
++ if (sw64_linux_watch_get_num_valid (&priv->watch_mirror) > 0)
++ {
++ /* Write the mirrored watch register values. */
++ int tid = ptid.lwp ();
++
++ if (-1 == ptrace (PTRACE_SET_WATCH_REGS, tid,
++ &priv->watch_mirror, NULL))
++ perror_with_name ("Couldn't write watch register");
++ }
++
++ lwp->arch_private->watch_registers_changed = 0;
++ }
++#endif
++}
++
++bool
++sw64_target::supports_z_point_type (char z_type)
++{
++ switch (z_type)
++ {
++ case Z_PACKET_WRITE_WP:
++ case Z_PACKET_READ_WP:
++ case Z_PACKET_ACCESS_WP:
++ case Z_PACKET_SW_BP:
++ case Z_PACKET_HW_BP:
++ case Z_PACKET_DV_WP:
++ return true;
++ default:
++ return false;
++ }
++}
++
++/* This is the implementation of linux target ops method
++ low_insert_point. */
++
++int
++sw64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
++ int len, raw_breakpoint *bp)
++{
++ int pid;
++ long lwpid;
++ enum sw64_hw_bp_type watch_type;
++ struct process_info *proc = current_process ();
++ struct lwp_info *lwp = get_thread_lwp (current_thread);
++ struct arch_lwp_info *priv = lwp->arch_private;
++
++ if (!addr)
++ return 0;
++
++ lwpid = lwpid_of (current_thread);
++ pid = pid_of (current_thread);
++
++ if ( len <= 0 || !is_power_of_2 (len))
++ return 0;
++
++ /* Now try to add the new watch. */
++ watch_type = sw64_hw_bp_type_from_raw_type(type);
++
++ if (sw64_linux_try_one_watch(lwpid, priv, watch_type,addr,len))
++ //find_inferior (&all_threads, update_watch_registers_callback, &pid);
++ /* Only update the threads of this process. */
++ for_each_thread (pid, update_watch_registers_callback);
++
++ return 0;
++
++ /* Only update the threads of this process. */
++ for_each_thread (proc->pid, update_watch_registers_callback);
++
++ return 0;
++}
++
++/* This is the implementation of linux target ops method
++ low_remove_point. */
++
++int
++sw64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
++ int len, raw_breakpoint *bp)
++{
++ int pid;
++ //uint64_t match;
++ //uint64_t data;
++ enum sw64_hw_bp_type watch_type;
++ long lwpid;
++ struct lwp_info *lwp = get_thread_lwp (current_thread);
++ struct arch_lwp_info *priv = lwp->arch_private;
++
++ /* Search for a known watch that matches. Then unlink and free it. */
++ watch_type = sw64_hw_bp_type_from_raw_type(type);
++
++ pid = pid_of (current_thread);
++ lwpid = lwpid_of(current_thread);
++ if (!sw64_linux_del_one_watch(lwpid, priv, watch_type,addr,len))
++ {
++ warning("none wp about %#lx\n", (long)addr);
++ return -1; /* We don't know about it, fail doing nothing. */
++ }
++ /* Only update the threads of this process. */
++ //find_inferior (&all_threads, update_watch_registers_callback, &pid);
++ for_each_thread (pid, update_watch_registers_callback);
++ return 0;
++}
++
++/* This is the implementation of linux target ops method
++ low_stopped_by_watchpoint. The watchhi R and W bits indicate
++ the watch register triggered. */
++
++bool
++sw64_target::low_stopped_by_watchpoint ()
++{
++ pid_t lwpid = lwpid_of(current_thread);
++ struct lwp_info *lwp = get_thread_lwp (current_thread);
++ siginfo_t siginfo;
++
++ /* Retrieve siginfo. */
++ errno = 0;
++ ptrace (PTRACE_GETSIGINFO, lwpid, 0, &siginfo);
++ if (errno != 0)
++ {
++ warning("%s:%d GETSIGINFO return %d\n", __FILE__, __LINE__, errno);
++ return false;
++ }
++// debug("GETSIGINFO %#x si_code=%#x si_signo=%d si_errno = %x",
++// lwpid, siginfo.si_code, siginfo.si_signo,siginfo.si_errno);
++ /* This must be a hardware breakpoint. */
++ if (siginfo.si_signo != SIGTRAP || (siginfo.si_code & 0xffff) != TRAP_HWBKPT)
++ return false;
++// debug("si_code=%#x si_signo=%d si_errno = %x pc=%#lx, data_address %#lx",
++// siginfo.si_code, siginfo.si_signo, siginfo.si_errno,
++// (long)siginfo.si_value.sival_ptr, (long)siginfo.si_addr);
++ /* siginfo should return the accessed data address, not pc */
++ if (siginfo.si_errno == 1)
++ lwp->arch_private->stopped_data_address
++ = (CORE_ADDR) (uintptr_t) (lwp->arch_private->wpt->match & ((1L<<53)-1));
++ else
++ lwp->arch_private->stopped_data_address
++ = (CORE_ADDR) (uintptr_t) (lwp->arch_private->value_address); // get the saved
++ return true;
++}
++
++/* This is the implementation of linux target ops method
++ low_stopped_data_address. */
++
++CORE_ADDR
++sw64_target::low_stopped_data_address ()
++{
++ struct lwp_info *lwp;
++
++ lwp = get_thread_lwp (current_thread);
++ low_stopped_by_watchpoint();
++
++ return lwp->arch_private->stopped_data_address;
++}
++
++/* Fetch the thread-local storage pointer for libthread_db. */
++
++ps_err_e
++ps_get_thread_area (struct ps_prochandle *ph,
++ lwpid_t lwpid, int idx, void **base)
++{
++ if (ptrace (PTRACE_GET_THREAD_AREA, lwpid, NULL, base) != 0)
++ return PS_ERR;
++
++ /* IDX is the bias from the thread pointer to the beginning of the
++ thread descriptor. It has to be subtracted due to implementation
++ quirks in libthread_db. */
++ *base = (void *) ((char *)*base - idx);
++
++ return PS_OK;
++}
++
++#ifdef HAVE_PTRACE_GETREGS
++
++static void
++sw64_collect_register (struct regcache *regcache,
++ int use_64bit, int regno, union sw64_register *reg)
++{
++ union sw64_register tmp_reg;
++
++ collect_register (regcache, regno, &tmp_reg.reg64);
++ *reg = tmp_reg;
++}
++
++static void
++sw64_supply_register (struct regcache *regcache,
++ int use_64bit, int regno, const union sw64_register *reg)
++{
++ int offset = 0;
++
++ supply_register (regcache, regno, reg->buf + offset);
++}
++
++#ifdef HAVE_PTRACE_GETREGS
++
++static void
++sw64_fill_gregset (struct regcache *regcache, void *buf)
++{
++ union sw64_register *regset = (union sw64_register *) buf;
++ int i, use_64bit;
++ const struct target_desc *tdesc = regcache->tdesc;
++
++ use_64bit = (register_size (tdesc, 0) == 8);
++
++ for (i = 1; i < 32; i++)
++ sw64_collect_register (regcache, use_64bit, i, regset + i);
++}
++
++static void
++sw64_store_gregset (struct regcache *regcache, const void *buf)
++{
++ const union sw64_register *regset = (const union sw64_register *) buf;
++ int i, use_64bit;
++
++ use_64bit = (register_size (regcache->tdesc, 0) == 8);
++
++ supply_register_by_name_zeroed (regcache, "r31");
++
++ for (i = 1; i < 32; i++)
++ sw64_supply_register (regcache, use_64bit, i, regset + i);
++}
++
++static void
++sw64_fill_fpregset (struct regcache *regcache, void *buf)
++{
++ union sw64_register *regset = (union sw64_register *) buf;
++ int i, use_64bit, first_fp, big_endian;
++
++ use_64bit = (register_size (regcache->tdesc, 0) == 8);
++ first_fp = find_regno (regcache->tdesc, "f0");
++ big_endian = (__BYTE_ORDER == __BIG_ENDIAN);
++
++ /* See GDB for a discussion of this peculiar layout. */
++ for (i = 0; i < 32; i++)
++ if (use_64bit)
++ collect_register (regcache, first_fp + i, regset[i].buf);
++ else
++ collect_register (regcache, first_fp + i,
++ regset[i & ~1].buf + 4 * (big_endian != (i & 1)));
++}
++
++static void
++sw64_store_fpregset (struct regcache *regcache, const void *buf)
++{
++ const union sw64_register *regset = (const union sw64_register *) buf;
++ int i, use_64bit, first_fp, big_endian;
++
++ use_64bit = (register_size (regcache->tdesc, 0) == 8);
++ first_fp = find_regno (regcache->tdesc, "f0");
++ big_endian = (__BYTE_ORDER == __BIG_ENDIAN);
++
++ /* See GDB for a discussion of this peculiar layout. */
++ for (i = 0; i < 32; i++)
++ if (use_64bit)
++ supply_register (regcache, first_fp + i, regset[i].buf);
++ else
++ supply_register (regcache, first_fp + i,
++ regset[i & ~1].buf + 4 * (big_endian != (i & 1)));
++}
++#endif /* HAVE_PTRACE_GETREGS */
++
++#if 0
++/* Take care of 32-bit registers with 64-bit ptrace, POKEUSER side. */
++
++void
++sw64_target::low_collect_ptrace_register (regcache *regcache, int regno,
++ char *buf)
++{
++ int use_64bit = sizeof (PTRACE_XFER_TYPE) == 8;
++
++ if (use_64bit && register_size (regcache->tdesc, regno) == 4)
++ {
++ union sw64_register reg;
++
++ sw64_collect_register (regcache, 0, regno, ®);
++ memcpy (buf, ®, sizeof (reg));
++ }
++ else
++ collect_register (regcache, regno, buf);
++}
++
++/* Take care of 32-bit registers with 64-bit ptrace, PEEKUSER side. */
++
++void
++sw64_target::low_supply_ptrace_register (regcache *regcache, int regno,
++ const char *buf)
++{
++ int use_64bit = sizeof (PTRACE_XFER_TYPE) == 8;
++
++ if (use_64bit && register_size (regcache->tdesc, regno) == 4)
++ {
++ union sw64_register reg;
++
++ memcpy (®, buf, sizeof (reg));
++ sw64_supply_register (regcache, 0, regno, ®);
++ }
++ else
++ supply_register (regcache, regno, buf);
++}
++#endif
++
++static struct regset_info sw64_regsets[] = {
++#ifdef HAVE_PTRACE_GETREGS
++ { PTRACE_GETREGS, PTRACE_SETREGS, 0, 33 * 8, GENERAL_REGS,
++ sw64_fill_gregset, sw64_store_gregset },
++ { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, 32 * 8, FP_REGS,
++ sw64_fill_fpregset, sw64_store_fpregset },
++#endif /* HAVE_PTRACE_GETREGS */
++ NULL_REGSET
++};
++
++static struct regsets_info sw64_regsets_info =
++{
++ sw64_regsets, /* regsets */
++ 0, /* num_regsets */
++ NULL, /* disabled_regsets */
++};
++
++#endif /* HAVE_PTRACE_GETREGS */
++
++static struct usrregs_info sw64_usrregs_info =
++{
++ sw64_num_regs,
++ sw64_regmap,
++};
++
++static struct regs_info myregs_info =
++{
++ NULL, /* regset_bitmap */
++ &sw64_usrregs_info,
++#ifdef HAVE_PTRACE_GETREGS
++ &sw64_regsets_info
++#endif
++};
++
++const regs_info *
++sw64_target::get_regs_info ()
++{
++ return &myregs_info;
++}
++
++#define BTSIZE 20
++int gcc_backtrace()
++{
++ int j, nptrs;
++ void *buffer[BTSIZE];
++ char **strings;
++
++ nptrs = backtrace(buffer, BTSIZE);
++
++ /* The call backtrace_symbols_fd(buffer, nptrs, STDOUT_FILENO)
++ would produce similar output to the following: */
++
++ strings = backtrace_symbols(buffer, nptrs);
++ if (strings == NULL) {
++ perror("backtrace_symbols");
++ return (EXIT_FAILURE);
++ }
++
++ for (j = 0; j < nptrs; j++)
++ printf(" %s", strings[j]);
++ printf("\n");
++
++ free(strings);
++ return 0;
++}
++
++
++void exc_handler(int sig, siginfo_t* info, void *arg)
++{
++ //unsigned long ra;
++ //unsigned long sp, pc;
++ //ucontext_t *uc = (ucontext_t *)arg;
++#if 0
++ register unsigned long __ra __asm__("$26");
++ pc = uc->uc_mcontext.sc_pc;
++ ra = uc->uc_mcontext.sc_regs[26];
++ sp = uc->uc_mcontext.sc_regs[30];
++ printf("pc = %#lx, ra = %#lx, sp=%#lx, called from %#lx\n", pc, ra, sp, *(unsigned long*)sp); fflush(stdout);
++#else
++ gcc_backtrace();
++#endif
++ exit(1);
++}
++
++/* The linux target ops object. */
++
++linux_process_target *the_linux_target = &the_sw64_target;
++
++void
++initialize_low_arch (void)
++{
++ struct sigaction sa, old_sa;
++
++ /* Initialize the Linux target descriptions. */
++ init_registers_sw64_linux ();
++
++ //initialize_regsets_info (&sw64_regsets_info);
++
++ sa.sa_sigaction = exc_handler;
++ sigemptyset (&sa.sa_mask);
++ sa.sa_flags = SA_RESTART|SA_SIGINFO;
++ sigaction (11, &sa, &old_sa);
++ sigaction (4, &sa, &old_sa);
++ sigaction (8, &sa, &old_sa);
++}
+diff -Naur gdb-14.1-after-patch/gdbserver/Makefile.in gdb-14.1-sw64/gdbserver/Makefile.in
+--- gdb-14.1-after-patch/gdbserver/Makefile.in 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/Makefile.in 2025-03-03 10:59:13.690000000 +0800
+@@ -173,6 +173,7 @@
+ $(srcdir)/i387-fp.cc \
+ $(srcdir)/inferiors.cc \
+ $(srcdir)/linux-aarch64-low.cc \
++ $(srcdir)/linux-sw64-low.cc \
+ $(srcdir)/linux-arc-low.cc \
+ $(srcdir)/linux-arm-low.cc \
+ $(srcdir)/linux-csky-low.cc \
+@@ -224,6 +225,7 @@
+ $(srcdir)/../gdb/nat/linux-osdata.c \
+ $(srcdir)/../gdb/nat/linux-personality.c \
+ $(srcdir)/../gdb/nat/mips-linux-watch.c \
++ $(srcdir)/../gdb/nat/sw64-linux-watch.c \
+ $(srcdir)/../gdb/nat/ppc-linux.c \
+ $(srcdir)/../gdb/nat/riscv-linux-tdesc.c \
+ $(srcdir)/../gdb/nat/fork-inferior.c \
+diff -Naur gdb-14.1-after-patch/gdbserver/mem-break.cc gdb-14.1-sw64/gdbserver/mem-break.cc
+--- gdb-14.1-after-patch/gdbserver/mem-break.cc 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/mem-break.cc 2025-03-03 10:59:13.690000000 +0800
+@@ -70,7 +70,7 @@
+ software breakpoints, a buffer holding a copy of the instructions
+ that would be in memory had not been a breakpoint there (we call
+ that the shadow memory of the breakpoint). We occasionally need to
+- temporarily uninsert a breakpoint without the client knowing about
++ temporarilly uninsert a breakpoint without the client knowing about
+ it (e.g., to step over an internal breakpoint), so we keep an
+ `inserted' state associated with this low level breakpoint
+ structure. There can only be one such object for a given address.
+@@ -252,6 +252,10 @@
+ return hw_read;
+ case raw_bkpt_type_access_wp:
+ return hw_access;
++#ifndef LHX20240711_gdbserver
++ case raw_bkpt_type_value_wp:
++ return hw_vstore;
++#endif
+ default:
+ internal_error ("bad raw breakpoint type %d", (int) raw_type);
+ }
+@@ -262,7 +266,11 @@
+ static enum bkpt_type
+ Z_packet_to_bkpt_type (char z_type)
+ {
++#ifndef LHX20240711_gdbserver
++ gdb_assert (z_type >= '0' && z_type <= Z_PACKET_DV_WP);
++#else
+ gdb_assert ('0' <= z_type && z_type <= '4');
++#endif
+
+ return (enum bkpt_type) (gdb_breakpoint_Z0 + (z_type - '0'));
+ }
+@@ -284,6 +292,10 @@
+ return raw_bkpt_type_read_wp;
+ case Z_PACKET_ACCESS_WP:
+ return raw_bkpt_type_access_wp;
++#ifndef LHX20240711_gdbserver
++ case Z_PACKET_DV_WP:
++ return raw_bkpt_type_value_wp;
++#endif
+ default:
+ gdb_assert_not_reached ("unhandled Z packet type.");
+ }
+diff -Naur gdb-14.1-after-patch/gdbserver/mem-break.h gdb-14.1-sw64/gdbserver/mem-break.h
+--- gdb-14.1-after-patch/gdbserver/mem-break.h 2023-02-02 12:45:52.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/mem-break.h 2025-03-03 10:59:13.690000000 +0800
+@@ -35,6 +35,9 @@
+ #define Z_PACKET_WRITE_WP '2'
+ #define Z_PACKET_READ_WP '3'
+ #define Z_PACKET_ACCESS_WP '4'
++#ifndef LHX20240711_gdbserver
++#define Z_PACKET_DV_WP '5'
++#endif
+
+ /* The low level breakpoint types. */
+
+@@ -54,6 +57,9 @@
+
+ /* Hardware-assisted access watchpoint. */
+ raw_bkpt_type_access_wp
++#ifndef LHX20240711_gdbserver
++ , raw_bkpt_type_value_wp
++#endif
+ };
+
+ /* Map the protocol breakpoint/watchpoint type Z_TYPE to the internal
+diff -Naur gdb-14.1-after-patch/gdbserver/regcache.cc gdb-14.1-sw64/gdbserver/regcache.cc
+--- gdb-14.1-after-patch/gdbserver/regcache.cc 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/gdbserver/regcache.cc 2025-03-03 10:59:13.690000000 +0800
+@@ -200,7 +200,7 @@
+ find_register_by_number (const struct target_desc *tdesc, int n)
+ {
+ gdb_assert (n >= 0);
+- gdb_assert (n < tdesc->reg_defs.size ());
++// gdb_assert (n < tdesc->reg_defs.size ());
+
+ return tdesc->reg_defs[n];
+ }
+diff -Naur gdb-14.1-after-patch/gdbsupport/break-common.h gdb-14.1-sw64/gdbsupport/break-common.h
+--- gdb-14.1-after-patch/gdbsupport/break-common.h 2023-02-02 12:45:52.000000000 +0800
++++ gdb-14.1-sw64/gdbsupport/break-common.h 2025-03-03 10:59:13.700000000 +0800
+@@ -26,6 +26,9 @@
+ hw_read = 1, /* Read HW watchpoint */
+ hw_access = 2, /* Access HW watchpoint */
+ hw_execute = 3 /* Execute HW breakpoint */
++#ifndef LHX20240711
++ , hw_vstore = 4
++#endif
+ };
+
+ #endif /* COMMON_BREAK_COMMON_H */
+diff -Naur gdb-14.1-after-patch/include/coff/ecoff.h gdb-14.1-sw64/include/coff/ecoff.h
+--- gdb-14.1-after-patch/include/coff/ecoff.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/include/coff/ecoff.h 2025-03-03 10:59:13.750000000 +0800
+@@ -41,6 +41,9 @@
+ #define MIPS_MAGIC_LITTLE3 0x142
+ #define MIPS_MAGIC_BIG3 0x140
+
++/* SW64 magic numbers used in filehdr. */
++#define SW64_MAGIC 0x9916
++
+ /* Alpha magic numbers used in filehdr. */
+ #define ALPHA_MAGIC 0x183
+ #define ALPHA_MAGIC_BSD 0x185
+diff -Naur gdb-14.1-after-patch/include/coff/sw64.h gdb-14.1-sw64/include/coff/sw64.h
+--- gdb-14.1-after-patch/include/coff/sw64.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/include/coff/sw64.h 2025-03-03 10:59:13.750000000 +0800
+@@ -0,0 +1,386 @@
++/* ECOFF support on SW64 machines.
++ coff/ecoff.h must be included before this file.
++
++ Copyright (C) 2001-2023 Free Software Foundation, Inc.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++/********************** FILE HEADER **********************/
++
++struct external_filehdr
++{
++ unsigned char f_magic[2]; /* magic number */
++ unsigned char f_nscns[2]; /* number of sections */
++ unsigned char f_timdat[4]; /* time & date stamp */
++ unsigned char f_symptr[8]; /* file pointer to symtab */
++ unsigned char f_nsyms[4]; /* number of symtab entries */
++ unsigned char f_opthdr[2]; /* sizeof(optional hdr) */
++ unsigned char f_flags[2]; /* flags */
++};
++
++/* Magic numbers are defined in coff/ecoff.h. */
++#define SW64_ECOFF_BADMAG(x) \
++ ((x).f_magic != SW64_MAGIC)
++
++#define SW64_ECOFF_COMPRESSEDMAG(x) \
++ ((x).f_magic == SW64_MAGIC)
++
++/* The object type is encoded in the f_flags. */
++#define F_SW64_OBJECT_TYPE_MASK 0x3000
++#define F_SW64_NO_SHARED 0x1000
++#define F_SW64_SHARABLE 0x2000
++#define F_SW64_CALL_SHARED 0x3000
++
++#define FILHDR struct external_filehdr
++#define FILHSZ 24
++
++/********************** AOUT "OPTIONAL HEADER" **********************/
++
++typedef struct external_aouthdr
++{
++ unsigned char magic[2]; /* type of file */
++ unsigned char vstamp[2]; /* version stamp */
++ unsigned char bldrev[2]; /* ?? */
++ unsigned char padding[2]; /* pad to quadword boundary */
++ unsigned char tsize[8]; /* text size in bytes */
++ unsigned char dsize[8]; /* initialized data " " */
++ unsigned char bsize[8]; /* uninitialized data " " */
++ unsigned char entry[8]; /* entry pt. */
++ unsigned char text_start[8]; /* base of text used for this file */
++ unsigned char data_start[8]; /* base of data used for this file */
++ unsigned char bss_start[8]; /* base of bss used for this file */
++ unsigned char gprmask[4]; /* bitmask of general registers used */
++ unsigned char fprmask[4]; /* bitmask of floating point registers used */
++ unsigned char gp_value[8]; /* value for gp register */
++} AOUTHDR;
++
++/* compute size of a header */
++
++#define AOUTSZ 80
++#define AOUTHDRSZ 80
++
++/********************** SECTION HEADER **********************/
++
++struct external_scnhdr
++{
++ unsigned char s_name[8]; /* section name */
++ unsigned char s_paddr[8]; /* physical address, aliased s_nlib */
++ unsigned char s_vaddr[8]; /* virtual address */
++ unsigned char s_size[8]; /* section size */
++ unsigned char s_scnptr[8]; /* file ptr to raw data for section */
++ unsigned char s_relptr[8]; /* file ptr to relocation */
++ unsigned char s_lnnoptr[8]; /* file ptr to line numbers */
++ unsigned char s_nreloc[2]; /* number of relocation entries */
++ unsigned char s_nlnno[2]; /* number of line number entries*/
++ unsigned char s_flags[4]; /* flags */
++};
++
++#define SCNHDR struct external_scnhdr
++#define SCNHSZ 64
++
++/********************** RELOCATION DIRECTIVES **********************/
++
++struct external_reloc
++{
++ unsigned char r_vaddr[8];
++ unsigned char r_symndx[4];
++ unsigned char r_bits[4];
++};
++
++#define RELOC struct external_reloc
++#define RELSZ 16
++
++/* Constants to unpack the r_bits field. The SW64 seems to always be
++ little endian, so I haven't bothered to define big endian variants
++ of these. */
++
++#define RELOC_BITS0_TYPE_LITTLE 0xff
++#define RELOC_BITS0_TYPE_SH_LITTLE 0
++
++#define RELOC_BITS1_EXTERN_LITTLE 0x01
++
++#define RELOC_BITS1_OFFSET_LITTLE 0x7e
++#define RELOC_BITS1_OFFSET_SH_LITTLE 1
++
++#define RELOC_BITS1_RESERVED_LITTLE 0x80
++#define RELOC_BITS1_RESERVED_SH_LITTLE 7
++#define RELOC_BITS2_RESERVED_LITTLE 0xff
++#define RELOC_BITS2_RESERVED_SH_LEFT_LITTLE 1
++#define RELOC_BITS3_RESERVED_LITTLE 0x03
++#define RELOC_BITS3_RESERVED_SH_LEFT_LITTLE 9
++
++#define RELOC_BITS3_SIZE_LITTLE 0xfc
++#define RELOC_BITS3_SIZE_SH_LITTLE 2
++
++/* The r_type field in a reloc is one of the following values. */
++#define SW64_R_IGNORE 0
++#define SW64_R_REFLONG 1
++#define SW64_R_REFQUAD 2
++#define SW64_R_GPREL32 3
++#define SW64_R_LITERAL 4
++#define SW64_R_LITUSE 5
++#define SW64_R_GPDISP 6
++#define SW64_R_BRADDR 7
++#define SW64_R_HINT 8
++#define SW64_R_SREL16 9
++#define SW64_R_SREL32 10
++#define SW64_R_SREL64 11
++#define SW64_R_OP_PUSH 12
++#define SW64_R_OP_STORE 13
++#define SW64_R_OP_PSUB 14
++#define SW64_R_OP_PRSHIFT 15
++#define SW64_R_GPVALUE 16
++#define SW64_R_GPRELHIGH 17
++#define SW64_R_GPRELLOW 18
++#define SW64_R_IMMED 19
++
++/* Overloaded reloc value used by Net- and OpenBSD. */
++#define SW64_R_LITERALSLEAZY 17
++
++/* With SW64_R_LITUSE, the r_size field is one of the following values. */
++#define SW64_R_LU_BASE 1
++#define SW64_R_LU_BYTOFF 2
++#define SW64_R_LU_JSR 3
++
++/* With SW64_R_IMMED, the r_size field is one of the following values. */
++#define SW64_R_IMMED_GP_16 1
++#define SW64_R_IMMED_GP_HI32 2
++#define SW64_R_IMMED_SCN_HI32 3
++#define SW64_R_IMMED_BR_HI32 4
++#define SW64_R_IMMED_LO32 5
++
++/********************** SYMBOLIC INFORMATION **********************/
++
++/* Written by John Gilmore. */
++
++/* ECOFF uses COFF-like section structures, but its own symbol format.
++ This file defines the symbol format in fields whose size and alignment
++ will not vary on different host systems. */
++
++/* File header as a set of bytes */
++
++struct hdr_ext
++{
++ unsigned char h_magic[2];
++ unsigned char h_vstamp[2];
++ unsigned char h_ilineMax[4];
++ unsigned char h_idnMax[4];
++ unsigned char h_ipdMax[4];
++ unsigned char h_isymMax[4];
++ unsigned char h_ioptMax[4];
++ unsigned char h_iauxMax[4];
++ unsigned char h_issMax[4];
++ unsigned char h_issExtMax[4];
++ unsigned char h_ifdMax[4];
++ unsigned char h_crfd[4];
++ unsigned char h_iextMax[4];
++ unsigned char h_cbLine[8];
++ unsigned char h_cbLineOffset[8];
++ unsigned char h_cbDnOffset[8];
++ unsigned char h_cbPdOffset[8];
++ unsigned char h_cbSymOffset[8];
++ unsigned char h_cbOptOffset[8];
++ unsigned char h_cbAuxOffset[8];
++ unsigned char h_cbSsOffset[8];
++ unsigned char h_cbSsExtOffset[8];
++ unsigned char h_cbFdOffset[8];
++ unsigned char h_cbRfdOffset[8];
++ unsigned char h_cbExtOffset[8];
++};
++
++/* File descriptor external record */
++
++struct fdr_ext
++{
++ unsigned char f_adr[8];
++ unsigned char f_cbLineOffset[8];
++ unsigned char f_cbLine[8];
++ unsigned char f_cbSs[8];
++ unsigned char f_rss[4];
++ unsigned char f_issBase[4];
++ unsigned char f_isymBase[4];
++ unsigned char f_csym[4];
++ unsigned char f_ilineBase[4];
++ unsigned char f_cline[4];
++ unsigned char f_ioptBase[4];
++ unsigned char f_copt[4];
++ unsigned char f_ipdFirst[4];
++ unsigned char f_cpd[4];
++ unsigned char f_iauxBase[4];
++ unsigned char f_caux[4];
++ unsigned char f_rfdBase[4];
++ unsigned char f_crfd[4];
++ unsigned char f_bits1[1];
++ unsigned char f_bits2[3];
++ unsigned char f_padding[4];
++};
++
++#define FDR_BITS1_LANG_BIG 0xF8
++#define FDR_BITS1_LANG_SH_BIG 3
++#define FDR_BITS1_LANG_LITTLE 0x1F
++#define FDR_BITS1_LANG_SH_LITTLE 0
++
++#define FDR_BITS1_FMERGE_BIG 0x04
++#define FDR_BITS1_FMERGE_LITTLE 0x20
++
++#define FDR_BITS1_FREADIN_BIG 0x02
++#define FDR_BITS1_FREADIN_LITTLE 0x40
++
++#define FDR_BITS1_FBIGENDIAN_BIG 0x01
++#define FDR_BITS1_FBIGENDIAN_LITTLE 0x80
++
++#define FDR_BITS2_GLEVEL_BIG 0xC0
++#define FDR_BITS2_GLEVEL_SH_BIG 6
++#define FDR_BITS2_GLEVEL_LITTLE 0x03
++#define FDR_BITS2_GLEVEL_SH_LITTLE 0
++
++/* We ignore the `reserved' field in bits2. */
++
++/* Procedure descriptor external record */
++
++struct pdr_ext {
++ unsigned char p_adr[8];
++ unsigned char p_cbLineOffset[8];
++ unsigned char p_isym[4];
++ unsigned char p_iline[4];
++ unsigned char p_regmask[4];
++ unsigned char p_regoffset[4];
++ unsigned char p_iopt[4];
++ unsigned char p_fregmask[4];
++ unsigned char p_fregoffset[4];
++ unsigned char p_frameoffset[4];
++ unsigned char p_lnLow[4];
++ unsigned char p_lnHigh[4];
++ unsigned char p_gp_prologue[1];
++ unsigned char p_bits1[1];
++ unsigned char p_bits2[1];
++ unsigned char p_localoff[1];
++ unsigned char p_framereg[2];
++ unsigned char p_pcreg[2];
++};
++
++#define PDR_BITS1_GP_USED_BIG 0x80
++#define PDR_BITS1_REG_FRAME_BIG 0x40
++#define PDR_BITS1_PROF_BIG 0x20
++#define PDR_BITS1_RESERVED_BIG 0x1f
++#define PDR_BITS1_RESERVED_SH_LEFT_BIG 8
++#define PDR_BITS2_RESERVED_BIG 0xff
++#define PDR_BITS2_RESERVED_SH_BIG 0
++
++#define PDR_BITS1_GP_USED_LITTLE 0x01
++#define PDR_BITS1_REG_FRAME_LITTLE 0x02
++#define PDR_BITS1_PROF_LITTLE 0x04
++#define PDR_BITS1_RESERVED_LITTLE 0xf8
++#define PDR_BITS1_RESERVED_SH_LITTLE 3
++#define PDR_BITS2_RESERVED_LITTLE 0xff
++#define PDR_BITS2_RESERVED_SH_LEFT_LITTLE 5
++
++/* Line numbers */
++
++struct line_ext {
++ unsigned char l_line[4];
++};
++
++/* Symbol external record */
++
++struct sym_ext {
++ unsigned char s_value[8];
++ unsigned char s_iss[4];
++ unsigned char s_bits1[1];
++ unsigned char s_bits2[1];
++ unsigned char s_bits3[1];
++ unsigned char s_bits4[1];
++};
++
++#define SYM_BITS1_ST_BIG 0xFC
++#define SYM_BITS1_ST_SH_BIG 2
++#define SYM_BITS1_ST_LITTLE 0x3F
++#define SYM_BITS1_ST_SH_LITTLE 0
++
++#define SYM_BITS1_SC_BIG 0x03
++#define SYM_BITS1_SC_SH_LEFT_BIG 3
++#define SYM_BITS1_SC_LITTLE 0xC0
++#define SYM_BITS1_SC_SH_LITTLE 6
++
++#define SYM_BITS2_SC_BIG 0xE0
++#define SYM_BITS2_SC_SH_BIG 5
++#define SYM_BITS2_SC_LITTLE 0x07
++#define SYM_BITS2_SC_SH_LEFT_LITTLE 2
++
++#define SYM_BITS2_RESERVED_BIG 0x10
++#define SYM_BITS2_RESERVED_LITTLE 0x08
++
++#define SYM_BITS2_INDEX_BIG 0x0F
++#define SYM_BITS2_INDEX_SH_LEFT_BIG 16
++#define SYM_BITS2_INDEX_LITTLE 0xF0
++#define SYM_BITS2_INDEX_SH_LITTLE 4
++
++#define SYM_BITS3_INDEX_SH_LEFT_BIG 8
++#define SYM_BITS3_INDEX_SH_LEFT_LITTLE 4
++
++#define SYM_BITS4_INDEX_SH_LEFT_BIG 0
++#define SYM_BITS4_INDEX_SH_LEFT_LITTLE 12
++
++/* External symbol external record */
++
++struct ext_ext {
++ struct sym_ext es_asym;
++ unsigned char es_bits1[1];
++ unsigned char es_bits2[3];
++ unsigned char es_ifd[4];
++};
++
++#define EXT_BITS1_JMPTBL_BIG 0x80
++#define EXT_BITS1_JMPTBL_LITTLE 0x01
++
++#define EXT_BITS1_COBOL_MAIN_BIG 0x40
++#define EXT_BITS1_COBOL_MAIN_LITTLE 0x02
++
++#define EXT_BITS1_WEAKEXT_BIG 0x20
++#define EXT_BITS1_WEAKEXT_LITTLE 0x04
++
++/* Dense numbers external record */
++
++struct dnr_ext {
++ unsigned char d_rfd[4];
++ unsigned char d_index[4];
++};
++
++/* Relative file descriptor */
++
++struct rfd_ext {
++ unsigned char rfd[4];
++};
++
++/* Optimizer symbol external record */
++
++struct opt_ext {
++ unsigned char o_bits1[1];
++ unsigned char o_bits2[1];
++ unsigned char o_bits3[1];
++ unsigned char o_bits4[1];
++ struct rndx_ext o_rndx;
++ unsigned char o_offset[4];
++};
++
++#define OPT_BITS2_VALUE_SH_LEFT_BIG 16
++#define OPT_BITS2_VALUE_SH_LEFT_LITTLE 0
++
++#define OPT_BITS3_VALUE_SH_LEFT_BIG 8
++#define OPT_BITS3_VALUE_SH_LEFT_LITTLE 8
++
++#define OPT_BITS4_VALUE_SH_LEFT_BIG 0
++#define OPT_BITS4_VALUE_SH_LEFT_LITTLE 16
+diff -Naur gdb-14.1-after-patch/include/dis-asm.h gdb-14.1-sw64/include/dis-asm.h
+--- gdb-14.1-after-patch/include/dis-asm.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/include/dis-asm.h 2025-03-03 10:59:13.750000000 +0800
+@@ -378,6 +378,7 @@
+ extern disassembler_ftype cris_get_disassembler (bfd *);
+
+ extern void print_aarch64_disassembler_options (FILE *);
++extern void print_sw64_disassembler_options (FILE *);
+ extern void print_i386_disassembler_options (FILE *);
+ extern void print_mips_disassembler_options (FILE *);
+ extern void print_nfp_disassembler_options (FILE *);
+diff -Naur gdb-14.1-after-patch/include/elf/common.h gdb-14.1-sw64/include/elf/common.h
+--- gdb-14.1-after-patch/include/elf/common.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/include/elf/common.h 2025-03-03 10:59:13.750000000 +0800
+@@ -415,6 +415,9 @@
+ /* Alpha backend magic number. Written in the absence of an ABI. */
+ #define EM_ALPHA 0x9026
+
++/* SW64 backend magic number. Written in the absence of an ABI. */
++#define EM_SW64 0x9916
++
+ /* Cygnus M32R ELF backend. Written in the absence of an ABI. */
+ #define EM_CYGNUS_M32R 0x9041
+
+diff -Naur gdb-14.1-after-patch/include/elf/sw64.h gdb-14.1-sw64/include/elf/sw64.h
+--- gdb-14.1-after-patch/include/elf/sw64.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/include/elf/sw64.h 2025-03-03 10:59:13.760000000 +0800
+@@ -0,0 +1,121 @@
++/* SW64 ELF support for BFD.
++ Copyright (C) 1996-2023 Free Software Foundation, Inc.
++
++ By Eric Youngdale, . No processor supplement available
++ for this platform.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++/* This file holds definitions specific to the SW64 ELF ABI. Note
++ that most of this is not actually implemented by BFD. */
++
++#ifndef _ELF_SW64_H
++#define _ELF_SW64_H
++
++/* Processor specific flags for the ELF header e_flags field. */
++
++/* All addresses must be below 2GB. */
++#define EF_SW64_32BIT 0x00000001
++
++/* All relocations needed for relaxation with code movement are present. */
++#define EF_SW64_CANRELAX 0x00000002
++
++/* Processor specific section flags. */
++
++/* This section must be in the global data area. */
++#define SHF_SW64_GPREL 0x10000000
++
++/* Section contains some sort of debugging information. The exact
++ format is unspecified. It's probably ECOFF symbols. */
++#define SHT_SW64_DEBUG 0x70000001
++
++/* Section contains register usage information. */
++#define SHT_SW64_REGINFO 0x70000002
++
++/* Special values for the st_other field in the symbol table. */
++
++#define STO_SW64_NOPV 0x80
++#define STO_SW64_STD_GPLOAD 0x88
++
++/* Special values for Elf64_Dyn tag. */
++#define DT_SW64_PLTRO DT_LOPROC
++
++#include "elf/reloc-macros.h"
++
++/* SW64 relocs. */
++START_RELOC_NUMBERS (elf_sw64_reloc_type)
++ RELOC_NUMBER (R_SW64_NONE, 0) /* No reloc */
++ RELOC_NUMBER (R_SW64_REFLONG, 1) /* Direct 32 bit */
++ RELOC_NUMBER (R_SW64_REFQUAD, 2) /* Direct 64 bit */
++ RELOC_NUMBER (R_SW64_GPREL32, 3) /* GP relative 32 bit */
++ RELOC_NUMBER (R_SW64_LITERAL, 4) /* GP relative 16 bit w/optimization */
++ RELOC_NUMBER (R_SW64_LITUSE, 5) /* Optimization hint for LITERAL */
++ RELOC_NUMBER (R_SW64_GPDISP, 6) /* Add displacement to GP */
++ RELOC_NUMBER (R_SW64_BRADDR, 7) /* PC+4 relative 23 bit shifted */
++ RELOC_NUMBER (R_SW64_HINT, 8) /* PC+4 relative 16 bit shifted */
++ RELOC_NUMBER (R_SW64_SREL16, 9) /* PC relative 16 bit */
++ RELOC_NUMBER (R_SW64_SREL32, 10) /* PC relative 32 bit */
++ RELOC_NUMBER (R_SW64_SREL64, 11) /* PC relative 64 bit */
++
++ /* Skip 12 - 16; deprecated ECOFF relocs. */
++
++ RELOC_NUMBER (R_SW64_GPRELHIGH, 17) /* GP relative 32 bit, high 16 bits */
++ RELOC_NUMBER (R_SW64_GPRELLOW, 18) /* GP relative 32 bit, low 16 bits */
++ RELOC_NUMBER (R_SW64_GPREL16, 19) /* GP relative 16 bit */
++
++ /* Skip 20 - 23; deprecated ECOFF relocs. */
++
++ /* These relocations are specific to shared libraries. */
++ RELOC_NUMBER (R_SW64_COPY, 24) /* Copy symbol at runtime */
++ RELOC_NUMBER (R_SW64_GLOB_DAT, 25) /* Create GOT entry */
++ RELOC_NUMBER (R_SW64_JMP_SLOT, 26) /* Create PLT entry */
++ RELOC_NUMBER (R_SW64_RELATIVE, 27) /* Adjust by program base */
++
++ /* Like BRADDR, but assert that the source and target object file
++ share the same GP value, and adjust the target address for
++ STO_SW64_STD_GPLOAD. */
++ RELOC_NUMBER (R_SW64_BRSGP, 28)
++
++ /* Thread-Local Storage. */
++ RELOC_NUMBER (R_SW64_TLSGD, 29)
++ RELOC_NUMBER (R_SW64_TLSLDM, 30)
++ RELOC_NUMBER (R_SW64_DTPMOD64, 31)
++ RELOC_NUMBER (R_SW64_GOTDTPREL, 32)
++ RELOC_NUMBER (R_SW64_DTPREL64, 33)
++ RELOC_NUMBER (R_SW64_DTPRELHI, 34)
++ RELOC_NUMBER (R_SW64_DTPRELLO, 35)
++ RELOC_NUMBER (R_SW64_DTPREL16, 36)
++ RELOC_NUMBER (R_SW64_GOTTPREL, 37)
++ RELOC_NUMBER (R_SW64_TPREL64, 38)
++ RELOC_NUMBER (R_SW64_TPRELHI, 39)
++ RELOC_NUMBER (R_SW64_TPRELLO, 40)
++ RELOC_NUMBER (R_SW64_TPREL16, 41)
++ RELOC_NUMBER (R_SW64_BR26ADDR, 42)
++ RELOC_NUMBER (R_SW64_LITERAL_GOT, 43) /* GP relative 16 bit */
++
++END_RELOC_NUMBERS (R_SW64_max)
++
++#define LITUSE_SW64_ADDR 0
++#define LITUSE_SW64_BASE 1
++#define LITUSE_SW64_BYTOFF 2
++#define LITUSE_SW64_JSR 3
++#define LITUSE_SW64_TLSGD 4
++#define LITUSE_SW64_TLSLDM 5
++#define LITUSE_SW64_JSRDIRECT 6
++
++#endif /* _ELF_SW64_H */
+diff -Naur gdb-14.1-after-patch/include/longlong.h gdb-14.1-sw64/include/longlong.h
+--- gdb-14.1-after-patch/include/longlong.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/include/longlong.h 2025-03-03 10:59:13.760000000 +0800
+@@ -605,6 +605,14 @@
+ # endif
+ #endif
+
++#ifdef __sw_64__
++# if W_TYPE_SIZE == 64
++# define count_leading_zeros(count, x) ((count) = __builtin_clzll (x))
++# define count_trailing_zeros(count, x) ((count) = __builtin_ctzll (x))
++# define COUNT_LEADING_ZEROS_0 64
++# endif
++#endif
++
+ #if defined (__M32R__) && W_TYPE_SIZE == 32
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ /* The cmp clears the condition bit. */ \
+diff -Naur gdb-14.1-after-patch/include/opcode/sw64.h gdb-14.1-sw64/include/opcode/sw64.h
+--- gdb-14.1-after-patch/include/opcode/sw64.h 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/include/opcode/sw64.h 2025-03-03 10:59:13.770000000 +0800
+@@ -0,0 +1,252 @@
++/* sw64.h -- Header file for SW64 opcode table
++ Copyright (C) 1996-2023 Free Software Foundation, Inc.
++ Contributed by Richard Henderson ,
++ patterned after the PPC opcode table written by Ian Lance Taylor.
++
++ This file is part of GDB, GAS, and the GNU binutils.
++
++ GDB, GAS, and the GNU binutils are free software; you can redistribute
++ them and/or modify them under the terms of the GNU General Public
++ License as published by the Free Software Foundation; either version 3,
++ or (at your option) any later version.
++
++ GDB, GAS, and the GNU binutils are distributed in the hope that they
++ will be useful, but WITHOUT ANY WARRANTY; without even the implied
++ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++ the GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this file; see the file COPYING3. If not, write to the Free
++ Software Foundation, 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#ifndef OPCODE_SW64_H
++#define OPCODE_SW64_H
++
++/* The opcode table is an array of struct sw64_opcode. */
++
++struct sw64_opcode
++{
++ /* The opcode name. */
++ const char *name;
++
++ /* The opcode itself. Those bits which will be filled in with
++ operands are zeroes. */
++ unsigned opcode;
++
++ /* The opcode mask. This is used by the disassembler. This is a
++ mask containing ones indicating those bits which must match the
++ opcode field, and zeroes indicating those bits which need not
++ match (and are presumably filled in by operands). */
++ unsigned mask;
++
++ /* One bit flags for the opcode. These are primarily used to
++ indicate specific processors and environments support the
++ instructions. The defined values are listed below. */
++ unsigned flags;
++
++ /* An array of operand codes. Each code is an index into the
++ operand table. They appear in the order which the operands must
++ appear in assembly code, and are terminated by a zero. */
++ unsigned char operands[4];
++};
++
++/* The table itself is sorted by major opcode number, and is otherwise
++ in the order in which the disassembler should consider
++ instructions. */
++extern const struct sw64_opcode sw64_opcodes[];
++extern const unsigned sw64_num_opcodes;
++
++/* Values defined for the flags field of a struct sw64_opcode. */
++
++/* CPU Availability */
++#define AXP_OPCODE_BASE 0x0001 /* Base architecture -- all cpus. */
++#define AXP_OPCODE_SW6A 0x0002
++#define AXP_OPCODE_SW6B 0x0004
++#define AXP_OPCODE_SW8A 0x0008
++#define AXP_OPCODE_EV4 0x0002 /* EV4 specific PALcode insns. */
++#define AXP_OPCODE_EV5 0x0004 /* EV5 specific PALcode insns. */
++#define AXP_OPCODE_EV6 0x0008 /* EV6 specific PALcode insns. */
++#define AXP_OPCODE_BWX 0x0100 /* Byte/word extension (amask bit 0). */
++#define AXP_OPCODE_CIX 0x0200 /* "Count" extension (amask bit 1). */
++#define AXP_OPCODE_MAX 0x0400 /* Multimedia extension (amask bit 8). */
++
++///* CPU Availability */
++//#define AXP_OPCODE_BASE 0x0001 /* Base architecture -- all cpus. */
++//#define AXP_OPCODE_SW6 0x0800 /* SW6 insns. */
++//#define AXP_OPCODE_SW6A 0x1000 /* SW6A insns. */
++//#define AXP_OPCODE_SW6B 0x2000 /* SW6B insns. */
++//#define AXP_OPCODE_SW8A 0x4000 /* SW8A insns. */
++
++#define AXP_OPCODE_NOPAL (~(AXP_OPCODE_EV4|AXP_OPCODE_EV5|AXP_OPCODE_EV6|AXP_OPCODE_SW6A|AXP_OPCODE_SW6B|AXP_OPCODE_SW8A))
++
++/* A macro to extract the major opcode from an instruction. */
++#define AXP_OP(i) (((i) >> 26) & 0x3F)
++
++#define AXP_LITOP(i) (((i) >> 26) & 0x3D)
++
++/* The total number of major opcodes. */
++#define AXP_NOPS 0x40
++
++
++/* The operands table is an array of struct sw64_operand. */
++
++struct sw64_operand
++{
++ /* The number of bits in the operand. */
++ unsigned int bits : 5;
++
++ /* How far the operand is left shifted in the instruction. */
++ unsigned int shift : 5;
++
++ /* The default relocation type for this operand. */
++ signed int default_reloc : 16;
++
++ /* One bit syntax flags. */
++ unsigned int flags : 16;
++
++ /* Insertion function. This is used by the assembler. To insert an
++ operand value into an instruction, check this field.
++
++ If it is NULL, execute
++ i |= (op & ((1 << o->bits) - 1)) << o->shift;
++ (i is the instruction which we are filling in, o is a pointer to
++ this structure, and op is the opcode value; this assumes twos
++ complement arithmetic).
++
++ If this field is not NULL, then simply call it with the
++ instruction and the operand value. It will return the new value
++ of the instruction. If the ERRMSG argument is not NULL, then if
++ the operand value is illegal, *ERRMSG will be set to a warning
++ string (the operand will be inserted in any case). If the
++ operand value is legal, *ERRMSG will be unchanged (most operands
++ can accept any value). */
++ unsigned (*insert) (unsigned instruction, int op, const char **errmsg);
++
++ /* Extraction function. This is used by the disassembler. To
++ extract this operand type from an instruction, check this field.
++
++ If it is NULL, compute
++ op = ((i) >> o->shift) & ((1 << o->bits) - 1);
++ if ((o->flags & AXP_OPERAND_SIGNED) != 0
++ && (op & (1 << (o->bits - 1))) != 0)
++ op -= 1 << o->bits;
++ (i is the instruction, o is a pointer to this structure, and op
++ is the result; this assumes twos complement arithmetic).
++
++ If this field is not NULL, then simply call it with the
++ instruction value. It will return the value of the operand. If
++ the INVALID argument is not NULL, *INVALID will be set to
++ non-zero if this operand type can not actually be extracted from
++ this operand (i.e., the instruction does not match). If the
++ operand is valid, *INVALID will not be changed. */
++ int (*extract) (unsigned instruction, int *invalid);
++};
++
++/* Elements in the table are retrieved by indexing with values from
++ the operands field of the sw64_opcodes table. */
++
++extern const struct sw64_operand sw64_operands[];
++extern const unsigned sw64_num_operands;
++
++/* Values defined for the flags field of a struct sw64_operand. */
++
++/* Mask for selecting the type for typecheck purposes */
++#define AXP_OPERAND_TYPECHECK_MASK \
++ (AXP_OPERAND_PARENS | AXP_OPERAND_COMMA | AXP_OPERAND_IR | \
++ AXP_OPERAND_FPR | AXP_OPERAND_RELATIVE | AXP_OPERAND_SIGNED | \
++ AXP_OPERAND_UNSIGNED)
++
++/* This operand does not actually exist in the assembler input. This
++ is used to support extended mnemonics, for which two operands fields
++ are identical. The assembler should call the insert function with
++ any op value. The disassembler should call the extract function,
++ ignore the return value, and check the value placed in the invalid
++ argument. */
++#define AXP_OPERAND_FAKE 01
++
++/* The operand should be wrapped in parentheses rather than separated
++ from the previous by a comma. This is used for the load and store
++ instructions which want their operands to look like "Ra,disp(Rb)". */
++#define AXP_OPERAND_PARENS 02
++
++/* Used in combination with PARENS, this supresses the supression of
++ the comma. This is used for "jmp Ra,(Rb),hint". */
++#define AXP_OPERAND_COMMA 04
++
++/* This operand names an integer register. */
++#define AXP_OPERAND_IR 010
++
++/* This operand names a floating point register. */
++#define AXP_OPERAND_FPR 020
++
++/* This operand is a relative branch displacement. The disassembler
++ prints these symbolically if possible. */
++#define AXP_OPERAND_RELATIVE 040
++
++/* This operand takes signed values. */
++#define AXP_OPERAND_SIGNED 0100
++
++/* This operand takes unsigned values. This exists primarily so that
++ a flags value of 0 can be treated as end-of-arguments. */
++#define AXP_OPERAND_UNSIGNED 0200
++
++/* Supress overflow detection on this field. This is used for hints. */
++#define AXP_OPERAND_NOOVERFLOW 0400
++
++/* Mask for optional argument default value. */
++#define AXP_OPERAND_OPTIONAL_MASK 07000
++
++/* This operand defaults to zero. This is used for jump hints. */
++#define AXP_OPERAND_DEFAULT_ZERO 01000
++
++/* This operand should default to the first (real) operand and is used
++ in conjunction with AXP_OPERAND_OPTIONAL. This allows
++ "and $0,3,$0" to be written as "and $0,3", etc. I don't like
++ it, but it's what DEC does. */
++#define AXP_OPERAND_DEFAULT_FIRST 02000
++
++/* Similarly, this operand should default to the second (real) operand.
++ This allows "negl $0" instead of "negl $0,$0". */
++#define AXP_OPERAND_DEFAULT_SECOND 04000
++
++#define AXP_OPERAND_DEFAULT_THIRD 8000
++
++
++/* Register common names */
++
++#define AXP_REG_V0 0
++#define AXP_REG_T0 1
++#define AXP_REG_T1 2
++#define AXP_REG_T2 3
++#define AXP_REG_T3 4
++#define AXP_REG_T4 5
++#define AXP_REG_T5 6
++#define AXP_REG_T6 7
++#define AXP_REG_T7 8
++#define AXP_REG_S0 9
++#define AXP_REG_S1 10
++#define AXP_REG_S2 11
++#define AXP_REG_S3 12
++#define AXP_REG_S4 13
++#define AXP_REG_S5 14
++#define AXP_REG_FP 15
++#define AXP_REG_A0 16
++#define AXP_REG_A1 17
++#define AXP_REG_A2 18
++#define AXP_REG_A3 19
++#define AXP_REG_A4 20
++#define AXP_REG_A5 21
++#define AXP_REG_T8 22
++#define AXP_REG_T9 23
++#define AXP_REG_T10 24
++#define AXP_REG_T11 25
++#define AXP_REG_RA 26
++#define AXP_REG_PV 27
++#define AXP_REG_T12 27
++#define AXP_REG_AT 28
++#define AXP_REG_GP 29
++#define AXP_REG_SP 30
++#define AXP_REG_ZERO 31
++
++#endif /* OPCODE_SW64_H */
diff --git a/gdb-14.1-add-support-for-SW64-005.patch b/gdb-14.1-add-support-for-SW64-005.patch
new file mode 100644
index 0000000000000000000000000000000000000000..73c8a2964066737f9143f9eae80e864be88875ff
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-005.patch
@@ -0,0 +1,2785 @@
+diff -Naur gdb-14.1-after-patch/bfd/archures.c gdb-14.1-sw64/bfd/archures.c
+--- gdb-14.1-after-patch/bfd/archures.c 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/archures.c 2025-03-03 10:59:12.950000000 +0800
+@@ -303,6 +303,10 @@
+ .#define bfd_mach_alpha_ev4 0x10
+ .#define bfd_mach_alpha_ev5 0x20
+ .#define bfd_mach_alpha_ev6 0x30
++. bfd_arch_sw64, {* SW64 *}
++.#define bfd_mach_sw64 0x10
++.#define bfd_mach_sw64_sw6b 0x20
++.#define bfd_mach_sw64_sw8a 0x30
+ . bfd_arch_arm, {* Advanced Risc Machines ARM. *}
+ .#define bfd_mach_arm_unknown 0
+ .#define bfd_mach_arm_2 1
+@@ -629,6 +633,7 @@
+ */
+
+ extern const bfd_arch_info_type bfd_aarch64_arch;
++extern const bfd_arch_info_type bfd_sw64_arch;
+ extern const bfd_arch_info_type bfd_alpha_arch;
+ extern const bfd_arch_info_type bfd_amdgcn_arch;
+ extern const bfd_arch_info_type bfd_arc_arch;
+@@ -718,6 +723,7 @@
+ SELECT_ARCHITECTURES,
+ #else
+ &bfd_aarch64_arch,
++ &bfd_sw64_arch,
+ &bfd_alpha_arch,
+ &bfd_amdgcn_arch,
+ &bfd_arc_arch,
+diff -Naur gdb-14.1-after-patch/bfd/bfd-in2.h gdb-14.1-sw64/bfd/bfd-in2.h
+--- gdb-14.1-after-patch/bfd/bfd-in2.h 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/bfd-in2.h 2025-03-03 10:59:12.950000000 +0800
+@@ -1555,6 +1555,10 @@
+ #define bfd_mach_sh4a 0x4a
+ #define bfd_mach_sh4a_nofpu 0x4b
+ #define bfd_mach_sh4al_dsp 0x4d
++ bfd_arch_sw64, /* SW64 */
++#define bfd_mach_sw64 0x10
++#define bfd_mach_sw64_sw6b 0x20
++#define bfd_mach_sw64_sw8a 0x30
+ bfd_arch_alpha, /* Dec Alpha. */
+ #define bfd_mach_alpha_ev4 0x10
+ #define bfd_mach_alpha_ev5 0x20
+@@ -3564,6 +3568,108 @@
+ BFD_RELOC_ALPHA_TPREL_LO16,
+ BFD_RELOC_ALPHA_TPREL16,
+
++/* SW64 ECOFF and ELF relocations. Some of these treat the symbol or
++"addend" in some special way.
++For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
++writing; when reading, it will be the absolute section symbol. The
++addend is the displacement in bytes of the "lda" instruction from
++the "ldah" instruction (which is at the address of this reloc). */
++ BFD_RELOC_SW64_GPDISP_HI16,
++
++/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
++with GPDISP_HI16 relocs. The addend is ignored when writing the
++relocations out, and is filled in with the file's GP value on
++reading, for convenience. */
++ BFD_RELOC_SW64_GPDISP_LO16,
++
++/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
++relocation except that there is no accompanying GPDISP_LO16
++relocation. */
++ BFD_RELOC_SW64_GPDISP,
++
++/* The SW64 LITERAL/LITUSE relocs are produced by a symbol reference;
++the assembler turns it into a LDQ instruction to load the address of
++the symbol, and then fills in a register in the real instruction.
++
++The LITERAL reloc, at the LDQ instruction, refers to the .lita
++section symbol. The addend is ignored when writing, but is filled
++in with the file's GP value on reading, for convenience, as with the
++GPDISP_LO16 reloc.
++
++The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
++It should refer to the symbol to be referenced, as with 16_GOTOFF,
++but it generates output not based on the position within the .got
++section, but relative to the GP value chosen for the file during the
++final link stage.
++
++The LITUSE reloc, on the instruction using the loaded address, gives
++information to the linker that it might be able to use to optimize
++away some literal section references. The symbol is ignored (read
++as the absolute section symbol), and the "addend" indicates the type
++of instruction using the register:
++1 - "memory" fmt insn
++2 - byte-manipulation (byte offset reg)
++3 - jsr (target of branch) */
++ BFD_RELOC_SW64_LITERAL,
++ BFD_RELOC_SW64_ELF_LITERAL,
++ BFD_RELOC_SW64_ELF_LITERAL_GOT,
++ BFD_RELOC_SW64_LITUSE,
++
++/* The HINT relocation indicates a value that should be filled into the
++"hint" field of a jmp/jsr/ret instruction, for possible branch-
++prediction logic which may be provided on some processors. */
++ BFD_RELOC_SW64_HINT,
++
++/* The LINKAGE relocation outputs a linkage pair in the object file,
++which is filled by the linker. */
++ BFD_RELOC_SW64_LINKAGE,
++
++/* The CODEADDR relocation outputs a STO_CA in the object file,
++which is filled by the linker. */
++ BFD_RELOC_SW64_CODEADDR,
++
++/* The GPREL_HI/LO relocations together form a 32-bit offset from the
++GP register. */
++ BFD_RELOC_SW64_GPREL_HI16,
++ BFD_RELOC_SW64_GPREL_LO16,
++
++/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
++share a common GP, and the target address is adjusted for
++STO_SW64_STD_GPLOAD. */
++ BFD_RELOC_SW64_BRSGP,
++
++/* The NOP relocation outputs a NOP if the longword displacement
++between two procedure entry points is < 2^21. */
++ BFD_RELOC_SW64_NOP,
++
++/* The BSR relocation outputs a BSR if the longword displacement
++between two procedure entry points is < 2^21. */
++ BFD_RELOC_SW64_BSR,
++
++/* The LDA relocation outputs a LDA if the longword displacement
++between two procedure entry points is < 2^16. */
++ BFD_RELOC_SW64_LDA,
++
++/* The BOH relocation outputs a BSR if the longword displacement
++between two procedure entry points is < 2^21, or else a hint. */
++ BFD_RELOC_SW64_BOH,
++
++/* SW64 thread-local storage relocations. */
++ BFD_RELOC_SW64_TLSGD,
++ BFD_RELOC_SW64_TLSLDM,
++ BFD_RELOC_SW64_DTPMOD64,
++ BFD_RELOC_SW64_GOTDTPREL16,
++ BFD_RELOC_SW64_DTPREL64,
++ BFD_RELOC_SW64_DTPREL_HI16,
++ BFD_RELOC_SW64_DTPREL_LO16,
++ BFD_RELOC_SW64_DTPREL16,
++ BFD_RELOC_SW64_GOTTPREL16,
++ BFD_RELOC_SW64_TPREL64,
++ BFD_RELOC_SW64_TPREL_HI16,
++ BFD_RELOC_SW64_TPREL_LO16,
++ BFD_RELOC_SW64_TPREL16,
++ BFD_RELOC_SW64_BR26,
++
+ /* The MIPS jump instruction. */
+ BFD_RELOC_MIPS_JMP,
+ BFD_RELOC_MICROMIPS_JMP,
+diff -Naur gdb-14.1-after-patch/bfd/coffcode.h gdb-14.1-sw64/bfd/coffcode.h
+--- gdb-14.1-after-patch/bfd/coffcode.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/bfd/coffcode.h 2025-03-03 10:59:12.960000000 +0800
+@@ -2235,6 +2235,12 @@
+ machine = internal_f->f_flags & F_LOONGARCH64_ARCHITECTURE_MASK;
+ break;
+ #endif
++#ifdef SW64MAGIC
++ case SW64MAGIC:
++ arch = bfd_arch_sw64;
++ machine = internal_f->f_flags & F_SW64_ARCHITECTURE_MASK;
++ break;
++#endif
+ #ifdef Z80MAGIC
+ case Z80MAGIC:
+ arch = bfd_arch_z80;
+@@ -2806,6 +2812,12 @@
+ return true;
+ #endif
+
++#ifdef SW64MAGIC
++ case bfd_arch_sw64:
++ * magicp = SW64MAGIC;
++ return true;
++#endif
++
+ #ifdef ARMMAGIC
+ case bfd_arch_arm:
+ #ifdef ARM_WINCE
+@@ -4060,6 +4072,11 @@
+ #define __A_MAGIC_SET__
+ internal_a.magic = ZMAGIC;
+ #endif
++
++#if defined(SW64)
++#define __A_MAGIC_SET__
++ internal_a.magic = ZMAGIC;
++#endif
+
+ #if defined MCORE_PE
+ #define __A_MAGIC_SET__
+diff -Naur gdb-14.1-after-patch/bfd/coff-sw64.c gdb-14.1-sw64/bfd/coff-sw64.c
+--- gdb-14.1-after-patch/bfd/coff-sw64.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/bfd/coff-sw64.c 2025-03-03 10:59:12.950000000 +0800
+@@ -0,0 +1,2482 @@
++/* BFD back-end for SW64 Extended-Coff files.
++ Copyright (C) 1993-2023 Free Software Foundation, Inc.
++ Modified from coff-mips.c by Steve Chamberlain and
++ Ian Lance Taylor .
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "bfdlink.h"
++#include "libbfd.h"
++#include "coff/internal.h"
++#include "coff/sym.h"
++#include "coff/symconst.h"
++#include "coff/ecoff.h"
++#include "coff/sw64.h"
++#include "aout/ar.h"
++#include "libcoff.h"
++#include "libecoff.h"
++
++/* Prototypes for static functions. */
++
++
++
++/* ECOFF has COFF sections, but the debugging information is stored in
++ a completely different format. ECOFF targets use some of the
++ swapping routines from coffswap.h, and some of the generic COFF
++ routines in coffgen.c, but, unlike the real COFF targets, do not
++ use coffcode.h itself.
++
++ Get the generic COFF swapping routines, except for the reloc,
++ symbol, and lineno ones. Give them ecoff names. Define some
++ accessor macros for the large sizes used for SW64 ECOFF. */
++
++#define GET_FILEHDR_SYMPTR H_GET_64
++#define PUT_FILEHDR_SYMPTR H_PUT_64
++#define GET_AOUTHDR_TSIZE H_GET_64
++#define PUT_AOUTHDR_TSIZE H_PUT_64
++#define GET_AOUTHDR_DSIZE H_GET_64
++#define PUT_AOUTHDR_DSIZE H_PUT_64
++#define GET_AOUTHDR_BSIZE H_GET_64
++#define PUT_AOUTHDR_BSIZE H_PUT_64
++#define GET_AOUTHDR_ENTRY H_GET_64
++#define PUT_AOUTHDR_ENTRY H_PUT_64
++#define GET_AOUTHDR_TEXT_START H_GET_64
++#define PUT_AOUTHDR_TEXT_START H_PUT_64
++#define GET_AOUTHDR_DATA_START H_GET_64
++#define PUT_AOUTHDR_DATA_START H_PUT_64
++#define GET_SCNHDR_PADDR H_GET_64
++#define PUT_SCNHDR_PADDR H_PUT_64
++#define GET_SCNHDR_VADDR H_GET_64
++#define PUT_SCNHDR_VADDR H_PUT_64
++#define GET_SCNHDR_SIZE H_GET_64
++#define PUT_SCNHDR_SIZE H_PUT_64
++#define GET_SCNHDR_SCNPTR H_GET_64
++#define PUT_SCNHDR_SCNPTR H_PUT_64
++#define GET_SCNHDR_RELPTR H_GET_64
++#define PUT_SCNHDR_RELPTR H_PUT_64
++#define GET_SCNHDR_LNNOPTR H_GET_64
++#define PUT_SCNHDR_LNNOPTR H_PUT_64
++
++#define SW64ECOFF
++
++#define NO_COFF_RELOCS
++#define NO_COFF_SYMBOLS
++#define NO_COFF_LINENOS
++#define coff_swap_filehdr_in sw64_ecoff_swap_filehdr_in
++#define coff_swap_filehdr_out sw64_ecoff_swap_filehdr_out
++#define coff_swap_aouthdr_in sw64_ecoff_swap_aouthdr_in
++#define coff_swap_aouthdr_out sw64_ecoff_swap_aouthdr_out
++#define coff_swap_scnhdr_in sw64_ecoff_swap_scnhdr_in
++#define coff_swap_scnhdr_out sw64_ecoff_swap_scnhdr_out
++#include "coffswap.h"
++
++/* Get the ECOFF swapping routines. */
++#define ECOFF_64
++#include "ecoffswap.h"
++
++/* How to process the various reloc types. */
++
++static bfd_reloc_status_type
++reloc_nil (bfd *abfd ATTRIBUTE_UNUSED,
++ arelent *reloc ATTRIBUTE_UNUSED,
++ asymbol *sym ATTRIBUTE_UNUSED,
++ void * data ATTRIBUTE_UNUSED,
++ asection *sec ATTRIBUTE_UNUSED,
++ bfd *output_bfd ATTRIBUTE_UNUSED,
++ char **error_message ATTRIBUTE_UNUSED)
++{
++ return bfd_reloc_ok;
++}
++
++/* In case we're on a 32-bit machine, construct a 64-bit "-1" value
++ from smaller values. Start with zero, widen, *then* decrement. */
++#define MINUS_ONE (((bfd_vma)0) - 1)
++
++static reloc_howto_type sw64_howto_table[] =
++{
++ /* Reloc type 0 is ignored by itself. However, it appears after a
++ GPDISP reloc to identify the location where the low order 16 bits
++ of the gp register are loaded. */
++ HOWTO (SW64_R_IGNORE, /* type */
++ 0, /* rightshift */
++ 1, /* size */
++ 8, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ reloc_nil, /* special_function */
++ "IGNORE", /* name */
++ true, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A 32 bit reference to a symbol. */
++ HOWTO (SW64_R_REFLONG, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ 0, /* special_function */
++ "REFLONG", /* name */
++ true, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 64 bit reference to a symbol. */
++ HOWTO (SW64_R_REFQUAD, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ 0, /* special_function */
++ "REFQUAD", /* name */
++ true, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 32 bit GP relative offset. This is just like REFLONG except
++ that when the value is used the value of the gp register will be
++ added in. */
++ HOWTO (SW64_R_GPREL32, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ 0, /* special_function */
++ "GPREL32", /* name */
++ true, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Used for an instruction that refers to memory off the GP
++ register. The offset is 16 bits of the 32 bit instruction. This
++ reloc always seems to be against the .lita section. */
++ HOWTO (SW64_R_LITERAL, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ 0, /* special_function */
++ "LITERAL", /* name */
++ true, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* This reloc only appears immediately following a LITERAL reloc.
++ It identifies a use of the literal. It seems that the linker can
++ use this to eliminate a portion of the .lita section. The symbol
++ index is special: 1 means the literal address is in the base
++ register of a memory format instruction; 2 means the literal
++ address is in the byte offset register of a byte-manipulation
++ instruction; 3 means the literal address is in the target
++ register of a jsr instruction. This does not actually do any
++ relocation. */
++ HOWTO (SW64_R_LITUSE, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ reloc_nil, /* special_function */
++ "LITUSE", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Load the gp register. This is always used for a ldah instruction
++ which loads the upper 16 bits of the gp register. The next reloc
++ will be an IGNORE reloc which identifies the location of the lda
++ instruction which loads the lower 16 bits. The symbol index of
++ the GPDISP instruction appears to actually be the number of bytes
++ between the ldah and lda instructions. This gives two different
++ ways to determine where the lda instruction is; I don't know why
++ both are used. The value to use for the relocation is the
++ difference between the GP value and the current location; the
++ load will always be done against a register holding the current
++ address. */
++ HOWTO (SW64_R_GPDISP, /* type */
++ 16, /* rightshift */
++ 4, /* size */
++ 16, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ reloc_nil, /* special_function */
++ "GPDISP", /* name */
++ true, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A 21 bit branch. The native assembler generates these for
++ branches within the text segment, and also fills in the PC
++ relative offset in the instruction. */
++ HOWTO (SW64_R_BRADDR, /* type */
++ 2, /* rightshift */
++ 4, /* size */
++ 21, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ 0, /* special_function */
++ "BRADDR", /* name */
++ true, /* partial_inplace */
++ 0x1fffff, /* src_mask */
++ 0x1fffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A hint for a jump to a register. */
++ HOWTO (SW64_R_HINT, /* type */
++ 2, /* rightshift */
++ 4, /* size */
++ 14, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "HINT", /* name */
++ true, /* partial_inplace */
++ 0x3fff, /* src_mask */
++ 0x3fff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* 16 bit PC relative offset. */
++ HOWTO (SW64_R_SREL16, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ 0, /* special_function */
++ "SREL16", /* name */
++ true, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* 32 bit PC relative offset. */
++ HOWTO (SW64_R_SREL32, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ 0, /* special_function */
++ "SREL32", /* name */
++ true, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 64 bit PC relative offset. */
++ HOWTO (SW64_R_SREL64, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ 0, /* special_function */
++ "SREL64", /* name */
++ true, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Push a value on the reloc evaluation stack. */
++ HOWTO (SW64_R_OP_PUSH, /* type */
++ 0, /* rightshift */
++ 0, /* size */
++ 0, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "OP_PUSH", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Store the value from the stack at the given address. Store it in
++ a bitfield of size r_size starting at bit position r_offset. */
++ HOWTO (SW64_R_OP_STORE, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "OP_STORE", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Subtract the reloc address from the value on the top of the
++ relocation stack. */
++ HOWTO (SW64_R_OP_PSUB, /* type */
++ 0, /* rightshift */
++ 0, /* size */
++ 0, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "OP_PSUB", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Shift the value on the top of the relocation stack right by the
++ given value. */
++ HOWTO (SW64_R_OP_PRSHIFT, /* type */
++ 0, /* rightshift */
++ 0, /* size */
++ 0, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "OP_PRSHIFT", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Adjust the GP value for a new range in the object file. */
++ HOWTO (SW64_R_GPVALUE, /* type */
++ 0, /* rightshift */
++ 0, /* size */
++ 0, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ 0, /* special_function */
++ "GPVALUE", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false) /* pcrel_offset */
++};
++
++/* Recognize an SW64 ECOFF file. */
++
++static bfd_cleanup
++sw64_ecoff_object_p (bfd *abfd)
++{
++ bfd_cleanup ret;
++
++ ret = coff_object_p (abfd);
++
++ if (ret != NULL)
++ {
++ asection *sec;
++
++ /* SW64 ECOFF has a .pdata section. The lnnoptr field of the
++ .pdata section is the number of entries it contains. Each
++ entry takes up 8 bytes. The number of entries is required
++ since the section is aligned to a 16 byte boundary. When we
++ link .pdata sections together, we do not want to include the
++ alignment bytes. We handle this on input by faking the size
++ of the .pdata section to remove the unwanted alignment bytes.
++ On output we will set the lnnoptr field and force the
++ alignment. */
++ sec = bfd_get_section_by_name (abfd, _PDATA);
++ if (sec != (asection *) NULL)
++ {
++ bfd_size_type size;
++
++ size = (bfd_size_type) sec->line_filepos * 8;
++ BFD_ASSERT (size == sec->size
++ || size + 8 == sec->size);
++ if (!bfd_set_section_size (sec, size))
++ return NULL;
++ }
++ }
++
++ return ret;
++}
++
++/* See whether the magic number matches. */
++
++static bool
++sw64_ecoff_bad_format_hook (bfd *abfd ATTRIBUTE_UNUSED,
++ void * filehdr)
++{
++ struct internal_filehdr *internal_f = (struct internal_filehdr *) filehdr;
++
++ if (! SW64_ECOFF_BADMAG (*internal_f))
++ return true;
++
++ if (SW64_ECOFF_COMPRESSEDMAG (*internal_f))
++ _bfd_error_handler
++ (_("%pB: cannot handle compressed SW64 binaries; "
++ "use compiler flags, or objZ, to generate uncompressed binaries"),
++ abfd);
++
++ return false;
++}
++
++/* This is a hook called by coff_real_object_p to create any backend
++ specific information. */
++
++static void *
++sw64_ecoff_mkobject_hook (bfd *abfd, void * filehdr, void * aouthdr)
++{
++ void * ecoff;
++
++ ecoff = _bfd_ecoff_mkobject_hook (abfd, filehdr, aouthdr);
++
++ if (ecoff != NULL)
++ {
++ struct internal_filehdr *internal_f = (struct internal_filehdr *) filehdr;
++
++ /* Set additional BFD flags according to the object type from the
++ machine specific file header flags. */
++ switch (internal_f->f_flags & F_SW64_OBJECT_TYPE_MASK)
++ {
++ case F_SW64_SHARABLE:
++ abfd->flags |= DYNAMIC;
++ break;
++ case F_SW64_CALL_SHARED:
++ /* Always executable if using shared libraries as the run time
++ loader might resolve undefined references. */
++ abfd->flags |= (DYNAMIC | EXEC_P);
++ break;
++ }
++ }
++ return ecoff;
++}
++
++/* Reloc handling. */
++
++/* Swap a reloc in. */
++
++static void
++sw64_ecoff_swap_reloc_in (bfd *abfd,
++ void * ext_ptr,
++ struct internal_reloc *intern)
++{
++ const RELOC *ext = (RELOC *) ext_ptr;
++
++ intern->r_vaddr = H_GET_64 (abfd, ext->r_vaddr);
++ intern->r_symndx = H_GET_32 (abfd, ext->r_symndx);
++
++ BFD_ASSERT (bfd_header_little_endian (abfd));
++
++ intern->r_type = ((ext->r_bits[0] & RELOC_BITS0_TYPE_LITTLE)
++ >> RELOC_BITS0_TYPE_SH_LITTLE);
++ intern->r_extern = (ext->r_bits[1] & RELOC_BITS1_EXTERN_LITTLE) != 0;
++ intern->r_offset = ((ext->r_bits[1] & RELOC_BITS1_OFFSET_LITTLE)
++ >> RELOC_BITS1_OFFSET_SH_LITTLE);
++ /* Ignored the reserved bits. */
++ intern->r_size = ((ext->r_bits[3] & RELOC_BITS3_SIZE_LITTLE)
++ >> RELOC_BITS3_SIZE_SH_LITTLE);
++
++ if (intern->r_type == SW64_R_LITUSE
++ || intern->r_type == SW64_R_GPDISP)
++ {
++ /* Handle the LITUSE and GPDISP relocs specially. Its symndx
++ value is not actually a symbol index, but is instead a
++ special code. We put the code in the r_size field, and
++ clobber the symndx. */
++ if (intern->r_size != 0)
++ abort ();
++ intern->r_size = intern->r_symndx;
++ intern->r_symndx = RELOC_SECTION_NONE;
++ }
++ else if (intern->r_type == SW64_R_IGNORE)
++ {
++ /* The IGNORE reloc generally follows a GPDISP reloc, and is
++ against the .lita section. The section is irrelevant. */
++ if (! intern->r_extern &&
++ intern->r_symndx == RELOC_SECTION_ABS)
++ abort ();
++ if (! intern->r_extern && intern->r_symndx == RELOC_SECTION_LITA)
++ intern->r_symndx = RELOC_SECTION_ABS;
++ }
++}
++
++/* Swap a reloc out. */
++
++static void
++sw64_ecoff_swap_reloc_out (bfd *abfd,
++ const struct internal_reloc *intern,
++ void * dst)
++{
++ RELOC *ext = (RELOC *) dst;
++ long symndx;
++ unsigned char size;
++
++ /* Undo the hackery done in swap_reloc_in. */
++ if (intern->r_type == SW64_R_LITUSE
++ || intern->r_type == SW64_R_GPDISP)
++ {
++ symndx = intern->r_size;
++ size = 0;
++ }
++ else if (intern->r_type == SW64_R_IGNORE
++ && ! intern->r_extern
++ && intern->r_symndx == RELOC_SECTION_ABS)
++ {
++ symndx = RELOC_SECTION_LITA;
++ size = intern->r_size;
++ }
++ else
++ {
++ symndx = intern->r_symndx;
++ size = intern->r_size;
++ }
++
++ /* XXX FIXME: The maximum symndx value used to be 14 but this
++ fails with object files produced by DEC's C++ compiler.
++ Where does the value 14 (or 15) come from anyway ? */
++ BFD_ASSERT (intern->r_extern
++ || (intern->r_symndx >= 0 && intern->r_symndx <= 15));
++
++ H_PUT_64 (abfd, intern->r_vaddr, ext->r_vaddr);
++ H_PUT_32 (abfd, symndx, ext->r_symndx);
++
++ BFD_ASSERT (bfd_header_little_endian (abfd));
++
++ ext->r_bits[0] = ((intern->r_type << RELOC_BITS0_TYPE_SH_LITTLE)
++ & RELOC_BITS0_TYPE_LITTLE);
++ ext->r_bits[1] = ((intern->r_extern ? RELOC_BITS1_EXTERN_LITTLE : 0)
++ | ((intern->r_offset << RELOC_BITS1_OFFSET_SH_LITTLE)
++ & RELOC_BITS1_OFFSET_LITTLE));
++ ext->r_bits[2] = 0;
++ ext->r_bits[3] = ((size << RELOC_BITS3_SIZE_SH_LITTLE)
++ & RELOC_BITS3_SIZE_LITTLE);
++}
++
++/* Finish canonicalizing a reloc. Part of this is generic to all
++ ECOFF targets, and that part is in ecoff.c. The rest is done in
++ this backend routine. It must fill in the howto field. */
++
++static void
++sw64_adjust_reloc_in (bfd *abfd,
++ const struct internal_reloc *intern,
++ arelent *rptr)
++{
++ if (intern->r_type > SW64_R_GPVALUE)
++ {
++ /* xgettext:c-format */
++ _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
++ abfd, intern->r_type);
++ bfd_set_error (bfd_error_bad_value);
++ rptr->addend = 0;
++ rptr->howto = NULL;
++ return;
++ }
++
++ switch (intern->r_type)
++ {
++ case SW64_R_BRADDR:
++ case SW64_R_SREL16:
++ case SW64_R_SREL32:
++ case SW64_R_SREL64:
++ /* This relocs appear to be fully resolved when they are against
++ internal symbols. Against external symbols, BRADDR at least
++ appears to be resolved against the next instruction. */
++ if (! intern->r_extern)
++ rptr->addend = 0;
++ else
++ rptr->addend = - (intern->r_vaddr + 4);
++ break;
++
++ case SW64_R_GPREL32:
++ case SW64_R_LITERAL:
++ /* Copy the gp value for this object file into the addend, to
++ ensure that we are not confused by the linker. */
++ if (! intern->r_extern)
++ rptr->addend += ecoff_data (abfd)->gp;
++ break;
++
++ case SW64_R_LITUSE:
++ case SW64_R_GPDISP:
++ /* The LITUSE and GPDISP relocs do not use a symbol, or an
++ addend, but they do use a special code. Put this code in the
++ addend field. */
++ rptr->addend = intern->r_size;
++ break;
++
++ case SW64_R_OP_STORE:
++ /* The STORE reloc needs the size and offset fields. We store
++ them in the addend. */
++ BFD_ASSERT (intern->r_offset <= 256);
++ rptr->addend = (intern->r_offset << 8) + intern->r_size;
++ break;
++
++ case SW64_R_OP_PUSH:
++ case SW64_R_OP_PSUB:
++ case SW64_R_OP_PRSHIFT:
++ /* The PUSH, PSUB and PRSHIFT relocs do not actually use an
++ address. I believe that the address supplied is really an
++ addend. */
++ rptr->addend = intern->r_vaddr;
++ break;
++
++ case SW64_R_GPVALUE:
++ /* Set the addend field to the new GP value. */
++ rptr->addend = intern->r_symndx + ecoff_data (abfd)->gp;
++ break;
++
++ case SW64_R_IGNORE:
++ /* If the type is SW64_R_IGNORE, make sure this is a reference
++ to the absolute section so that the reloc is ignored. For
++ some reason the address of this reloc type is not adjusted by
++ the section vma. We record the gp value for this object file
++ here, for convenience when doing the GPDISP relocation. */
++ rptr->sym_ptr_ptr = bfd_abs_section_ptr->symbol_ptr_ptr;
++ rptr->address = intern->r_vaddr;
++ rptr->addend = ecoff_data (abfd)->gp;
++ break;
++
++ default:
++ break;
++ }
++
++ rptr->howto = &sw64_howto_table[intern->r_type];
++}
++
++/* When writing out a reloc we need to pull some values back out of
++ the addend field into the reloc. This is roughly the reverse of
++ sw64_adjust_reloc_in, except that there are several changes we do
++ not need to undo. */
++
++static void
++sw64_adjust_reloc_out (bfd *abfd ATTRIBUTE_UNUSED,
++ const arelent *rel,
++ struct internal_reloc *intern)
++{
++ switch (intern->r_type)
++ {
++ case SW64_R_LITUSE:
++ case SW64_R_GPDISP:
++ intern->r_size = rel->addend;
++ break;
++
++ case SW64_R_OP_STORE:
++ intern->r_size = rel->addend & 0xff;
++ intern->r_offset = (rel->addend >> 8) & 0xff;
++ break;
++
++ case SW64_R_OP_PUSH:
++ case SW64_R_OP_PSUB:
++ case SW64_R_OP_PRSHIFT:
++ intern->r_vaddr = rel->addend;
++ break;
++
++ case SW64_R_IGNORE:
++ intern->r_vaddr = rel->address;
++ break;
++
++ default:
++ break;
++ }
++}
++
++/* The size of the stack for the relocation evaluator. */
++#define RELOC_STACKSIZE (10)
++
++/* SW64 ECOFF relocs have a built in expression evaluator as well as
++ other interdependencies. Rather than use a bunch of special
++ functions and global variables, we use a single routine to do all
++ the relocation for a section. I haven't yet worked out how the
++ assembler is going to handle this. */
++
++static bfd_byte *
++sw64_ecoff_get_relocated_section_contents (bfd *abfd,
++ struct bfd_link_info *link_info,
++ struct bfd_link_order *link_order,
++ bfd_byte *data,
++ bool relocatable,
++ asymbol **symbols)
++{
++ bfd *input_bfd = link_order->u.indirect.section->owner;
++ asection *input_section = link_order->u.indirect.section;
++ long reloc_size;
++ arelent **reloc_vector;
++ long reloc_count;
++ bfd *output_bfd = relocatable ? abfd : (bfd *) NULL;
++ bfd_vma gp;
++ bool gp_undefined;
++ bfd_vma stack[RELOC_STACKSIZE];
++ int tos = 0;
++
++ reloc_size = bfd_get_reloc_upper_bound (input_bfd, input_section);
++ if (reloc_size < 0)
++ return NULL;
++
++ bfd_byte *orig_data = data;
++ if (!bfd_get_full_section_contents (input_bfd, input_section, &data))
++ return NULL;
++
++ if (data == NULL)
++ return NULL;
++
++ if (reloc_size == 0)
++ return data;
++
++ reloc_vector = (arelent **) bfd_malloc (reloc_size);
++ if (reloc_vector == NULL)
++ goto error_return;
++
++ reloc_count = bfd_canonicalize_reloc (input_bfd, input_section,
++ reloc_vector, symbols);
++ if (reloc_count < 0)
++ goto error_return;
++ if (reloc_count == 0)
++ goto successful_return;
++
++ /* Get the GP value for the output BFD. */
++ gp_undefined = false;
++ gp = _bfd_get_gp_value (abfd);
++ if (gp == 0)
++ {
++ if (relocatable)
++ {
++ asection *sec;
++ bfd_vma lo;
++
++ /* Make up a value. */
++ lo = (bfd_vma) -1;
++ for (sec = abfd->sections; sec != NULL; sec = sec->next)
++ {
++ if (sec->vma < lo
++ && (strcmp (sec->name, ".sbss") == 0
++ || strcmp (sec->name, ".sdata") == 0
++ || strcmp (sec->name, ".lit4") == 0
++ || strcmp (sec->name, ".lit8") == 0
++ || strcmp (sec->name, ".lita") == 0))
++ lo = sec->vma;
++ }
++ gp = lo + 0x8000;
++ _bfd_set_gp_value (abfd, gp);
++ }
++ else
++ {
++ struct bfd_link_hash_entry *h;
++
++ h = bfd_link_hash_lookup (link_info->hash, "_gp", false, false,
++ true);
++ if (h == (struct bfd_link_hash_entry *) NULL
++ || h->type != bfd_link_hash_defined)
++ gp_undefined = true;
++ else
++ {
++ gp = (h->u.def.value
++ + h->u.def.section->output_section->vma
++ + h->u.def.section->output_offset);
++ _bfd_set_gp_value (abfd, gp);
++ }
++ }
++ }
++
++ for (; *reloc_vector != (arelent *) NULL; reloc_vector++)
++ {
++ arelent *rel;
++ bfd_reloc_status_type r;
++ char *err;
++
++ rel = *reloc_vector;
++ r = bfd_reloc_ok;
++ switch (rel->howto->type)
++ {
++ case SW64_R_IGNORE:
++ rel->address += input_section->output_offset;
++ break;
++
++ case SW64_R_REFLONG:
++ case SW64_R_REFQUAD:
++ case SW64_R_BRADDR:
++ case SW64_R_HINT:
++ case SW64_R_SREL16:
++ case SW64_R_SREL32:
++ case SW64_R_SREL64:
++ if (relocatable
++ && ((*rel->sym_ptr_ptr)->flags & BSF_SECTION_SYM) == 0)
++ {
++ rel->address += input_section->output_offset;
++ break;
++ }
++ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
++ output_bfd, &err);
++ break;
++
++ case SW64_R_GPREL32:
++ /* This relocation is used in a switch table. It is a 32
++ bit offset from the current GP value. We must adjust it
++ by the different between the original GP value and the
++ current GP value. The original GP value is stored in the
++ addend. We adjust the addend and let
++ bfd_perform_relocation finish the job. */
++ rel->addend -= gp;
++ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
++ output_bfd, &err);
++ if (r == bfd_reloc_ok && gp_undefined)
++ {
++ r = bfd_reloc_dangerous;
++ err = (char *) _("GP relative relocation used when GP not defined");
++ }
++ break;
++
++ case SW64_R_LITERAL:
++ /* This is a reference to a literal value, generally
++ (always?) in the .lita section. This is a 16 bit GP
++ relative relocation. Sometimes the subsequent reloc is a
++ LITUSE reloc, which indicates how this reloc is used.
++ This sometimes permits rewriting the two instructions
++ referred to by the LITERAL and the LITUSE into different
++ instructions which do not refer to .lita. This can save
++ a memory reference, and permits removing a value from
++ .lita thus saving GP relative space.
++
++ We do not these optimizations. To do them we would need
++ to arrange to link the .lita section first, so that by
++ the time we got here we would know the final values to
++ use. This would not be particularly difficult, but it is
++ not currently implemented. */
++
++ {
++ unsigned long insn;
++
++ /* I believe that the LITERAL reloc will only apply to a
++ ldq or ldl instruction, so check my assumption. */
++ insn = bfd_get_32 (input_bfd, data + rel->address);
++ BFD_ASSERT (((insn >> 26) & 0x3f) == 0x29
++ || ((insn >> 26) & 0x3f) == 0x28);
++
++ rel->addend -= gp;
++ r = bfd_perform_relocation (input_bfd, rel, data, input_section,
++ output_bfd, &err);
++ if (r == bfd_reloc_ok && gp_undefined)
++ {
++ r = bfd_reloc_dangerous;
++ err =
++ (char *) _("GP relative relocation used when GP not defined");
++ }
++ }
++ break;
++
++ case SW64_R_LITUSE:
++ /* See SW64_R_LITERAL above for the uses of this reloc. It
++ does not cause anything to happen, itself. */
++ rel->address += input_section->output_offset;
++ break;
++
++ case SW64_R_GPDISP:
++ /* This marks the ldah of an ldah/lda pair which loads the
++ gp register with the difference of the gp value and the
++ current location. The second of the pair is r_size bytes
++ ahead; it used to be marked with an SW64_R_IGNORE reloc,
++ but that no longer happens in OSF/1 3.2. */
++ {
++ unsigned long insn1, insn2;
++ bfd_vma addend;
++
++ /* Get the two instructions. */
++ insn1 = bfd_get_32 (input_bfd, data + rel->address);
++ insn2 = bfd_get_32 (input_bfd, data + rel->address + rel->addend);
++
++ BFD_ASSERT (((insn1 >> 26) & 0x3f) == 0x09); /* ldah */
++ BFD_ASSERT (((insn2 >> 26) & 0x3f) == 0x08); /* lda */
++
++ /* Get the existing addend. We must account for the sign
++ extension done by lda and ldah. */
++ addend = ((insn1 & 0xffff) << 16) + (insn2 & 0xffff);
++ if (insn1 & 0x8000)
++ {
++ addend -= 0x80000000;
++ addend -= 0x80000000;
++ }
++ if (insn2 & 0x8000)
++ addend -= 0x10000;
++
++ /* The existing addend includes the different between the
++ gp of the input BFD and the address in the input BFD.
++ Subtract this out. */
++ addend -= (ecoff_data (input_bfd)->gp
++ - (input_section->vma + rel->address));
++
++ /* Now add in the final gp value, and subtract out the
++ final address. */
++ addend += (gp
++ - (input_section->output_section->vma
++ + input_section->output_offset
++ + rel->address));
++
++ /* Change the instructions, accounting for the sign
++ extension, and write them out. */
++ if (addend & 0x8000)
++ addend += 0x10000;
++ insn1 = (insn1 & 0xffff0000) | ((addend >> 16) & 0xffff);
++ insn2 = (insn2 & 0xffff0000) | (addend & 0xffff);
++
++ bfd_put_32 (input_bfd, (bfd_vma) insn1, data + rel->address);
++ bfd_put_32 (input_bfd, (bfd_vma) insn2,
++ data + rel->address + rel->addend);
++
++ rel->address += input_section->output_offset;
++ }
++ break;
++
++ case SW64_R_OP_PUSH:
++ /* Push a value on the reloc evaluation stack. */
++ {
++ asymbol *symbol;
++ bfd_vma relocation;
++
++ if (relocatable)
++ {
++ rel->address += input_section->output_offset;
++ break;
++ }
++
++ /* Figure out the relocation of this symbol. */
++ symbol = *rel->sym_ptr_ptr;
++
++ if (bfd_is_und_section (symbol->section))
++ r = bfd_reloc_undefined;
++
++ if (bfd_is_com_section (symbol->section))
++ relocation = 0;
++ else
++ relocation = symbol->value;
++ relocation += symbol->section->output_section->vma;
++ relocation += symbol->section->output_offset;
++ relocation += rel->addend;
++
++ if (tos >= RELOC_STACKSIZE)
++ abort ();
++
++ stack[tos++] = relocation;
++ }
++ break;
++
++ case SW64_R_OP_STORE:
++ /* Store a value from the reloc stack into a bitfield. */
++ {
++ bfd_vma val;
++ int offset, size;
++
++ if (relocatable)
++ {
++ rel->address += input_section->output_offset;
++ break;
++ }
++
++ if (tos == 0)
++ abort ();
++
++ /* The offset and size for this reloc are encoded into the
++ addend field by sw64_adjust_reloc_in. */
++ offset = (rel->addend >> 8) & 0xff;
++ size = rel->addend & 0xff;
++
++ val = bfd_get_64 (abfd, data + rel->address);
++ val &=~ (((1 << size) - 1) << offset);
++ val |= (stack[--tos] & ((1 << size) - 1)) << offset;
++ bfd_put_64 (abfd, val, data + rel->address);
++ }
++ break;
++
++ case SW64_R_OP_PSUB:
++ /* Subtract a value from the top of the stack. */
++ {
++ asymbol *symbol;
++ bfd_vma relocation;
++
++ if (relocatable)
++ {
++ rel->address += input_section->output_offset;
++ break;
++ }
++
++ /* Figure out the relocation of this symbol. */
++ symbol = *rel->sym_ptr_ptr;
++
++ if (bfd_is_und_section (symbol->section))
++ r = bfd_reloc_undefined;
++
++ if (bfd_is_com_section (symbol->section))
++ relocation = 0;
++ else
++ relocation = symbol->value;
++ relocation += symbol->section->output_section->vma;
++ relocation += symbol->section->output_offset;
++ relocation += rel->addend;
++
++ if (tos == 0)
++ abort ();
++
++ stack[tos - 1] -= relocation;
++ }
++ break;
++
++ case SW64_R_OP_PRSHIFT:
++ /* Shift the value on the top of the stack. */
++ {
++ asymbol *symbol;
++ bfd_vma relocation;
++
++ if (relocatable)
++ {
++ rel->address += input_section->output_offset;
++ break;
++ }
++
++ /* Figure out the relocation of this symbol. */
++ symbol = *rel->sym_ptr_ptr;
++
++ if (bfd_is_und_section (symbol->section))
++ r = bfd_reloc_undefined;
++
++ if (bfd_is_com_section (symbol->section))
++ relocation = 0;
++ else
++ relocation = symbol->value;
++ relocation += symbol->section->output_section->vma;
++ relocation += symbol->section->output_offset;
++ relocation += rel->addend;
++
++ if (tos == 0)
++ abort ();
++
++ stack[tos - 1] >>= relocation;
++ }
++ break;
++
++ case SW64_R_GPVALUE:
++ /* I really don't know if this does the right thing. */
++ gp = rel->addend;
++ gp_undefined = false;
++ break;
++
++ default:
++ abort ();
++ }
++
++ if (relocatable)
++ {
++ asection *os = input_section->output_section;
++
++ /* A partial link, so keep the relocs. */
++ os->orelocation[os->reloc_count] = rel;
++ os->reloc_count++;
++ }
++
++ if (r != bfd_reloc_ok)
++ {
++ switch (r)
++ {
++ case bfd_reloc_undefined:
++ (*link_info->callbacks->undefined_symbol)
++ (link_info, bfd_asymbol_name (*rel->sym_ptr_ptr),
++ input_bfd, input_section, rel->address, true);
++ break;
++ case bfd_reloc_dangerous:
++ (*link_info->callbacks->reloc_dangerous)
++ (link_info, err, input_bfd, input_section, rel->address);
++ break;
++ case bfd_reloc_overflow:
++ (*link_info->callbacks->reloc_overflow)
++ (link_info, NULL, bfd_asymbol_name (*rel->sym_ptr_ptr),
++ rel->howto->name, rel->addend, input_bfd,
++ input_section, rel->address);
++ break;
++ case bfd_reloc_outofrange:
++ default:
++ abort ();
++ break;
++ }
++ }
++ }
++
++ if (tos != 0)
++ abort ();
++
++ successful_return:
++ free (reloc_vector);
++ return data;
++
++ error_return:
++ free (reloc_vector);
++ if (orig_data == NULL)
++ free (data);
++ return NULL;
++}
++
++/* Get the howto structure for a generic reloc type. */
++
++static reloc_howto_type *
++sw64_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ bfd_reloc_code_real_type code)
++{
++ int sw64_type;
++
++ switch (code)
++ {
++ case BFD_RELOC_32:
++ sw64_type = SW64_R_REFLONG;
++ break;
++ case BFD_RELOC_64:
++ case BFD_RELOC_CTOR:
++ sw64_type = SW64_R_REFQUAD;
++ break;
++ case BFD_RELOC_GPREL32:
++ sw64_type = SW64_R_GPREL32;
++ break;
++ case BFD_RELOC_SW64_LITERAL:
++ sw64_type = SW64_R_LITERAL;
++ break;
++ case BFD_RELOC_SW64_LITUSE:
++ sw64_type = SW64_R_LITUSE;
++ break;
++ case BFD_RELOC_SW64_GPDISP_HI16:
++ sw64_type = SW64_R_GPDISP;
++ break;
++ case BFD_RELOC_SW64_GPDISP_LO16:
++ sw64_type = SW64_R_IGNORE;
++ break;
++ case BFD_RELOC_23_PCREL_S2:
++ sw64_type = SW64_R_BRADDR;
++ break;
++ case BFD_RELOC_SW64_HINT:
++ sw64_type = SW64_R_HINT;
++ break;
++ case BFD_RELOC_16_PCREL:
++ sw64_type = SW64_R_SREL16;
++ break;
++ case BFD_RELOC_32_PCREL:
++ sw64_type = SW64_R_SREL32;
++ break;
++ case BFD_RELOC_64_PCREL:
++ sw64_type = SW64_R_SREL64;
++ break;
++ default:
++ return (reloc_howto_type *) NULL;
++ }
++
++ return &sw64_howto_table[sw64_type];
++}
++
++static reloc_howto_type *
++sw64_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ const char *r_name)
++{
++ unsigned int i;
++
++ for (i = 0;
++ i < sizeof (sw64_howto_table) / sizeof (sw64_howto_table[0]);
++ i++)
++ if (sw64_howto_table[i].name != NULL
++ && strcasecmp (sw64_howto_table[i].name, r_name) == 0)
++ return &sw64_howto_table[i];
++
++ return NULL;
++}
++
++/* A helper routine for sw64_relocate_section which converts an
++ external reloc when generating relocatable output. Returns the
++ relocation amount. */
++
++static bfd_vma
++sw64_convert_external_reloc (bfd *output_bfd ATTRIBUTE_UNUSED,
++ struct bfd_link_info *info,
++ bfd *input_bfd,
++ struct external_reloc *ext_rel,
++ struct ecoff_link_hash_entry *h)
++{
++ unsigned long r_symndx;
++ bfd_vma relocation;
++
++ BFD_ASSERT (bfd_link_relocatable (info));
++
++ if (h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak)
++ {
++ asection *hsec;
++ const char *name;
++
++ /* This symbol is defined in the output. Convert the reloc from
++ being against the symbol to being against the section. */
++
++ /* Clear the r_extern bit. */
++ ext_rel->r_bits[1] &=~ RELOC_BITS1_EXTERN_LITTLE;
++
++ /* Compute a new r_symndx value. */
++ hsec = h->root.u.def.section;
++ name = bfd_section_name (hsec->output_section);
++
++ r_symndx = (unsigned long) -1;
++ switch (name[1])
++ {
++ case 'A':
++ if (strcmp (name, "*ABS*") == 0)
++ r_symndx = RELOC_SECTION_ABS;
++ break;
++ case 'b':
++ if (strcmp (name, ".bss") == 0)
++ r_symndx = RELOC_SECTION_BSS;
++ break;
++ case 'd':
++ if (strcmp (name, ".data") == 0)
++ r_symndx = RELOC_SECTION_DATA;
++ break;
++ case 'f':
++ if (strcmp (name, ".fini") == 0)
++ r_symndx = RELOC_SECTION_FINI;
++ break;
++ case 'i':
++ if (strcmp (name, ".init") == 0)
++ r_symndx = RELOC_SECTION_INIT;
++ break;
++ case 'l':
++ if (strcmp (name, ".lita") == 0)
++ r_symndx = RELOC_SECTION_LITA;
++ else if (strcmp (name, ".lit8") == 0)
++ r_symndx = RELOC_SECTION_LIT8;
++ else if (strcmp (name, ".lit4") == 0)
++ r_symndx = RELOC_SECTION_LIT4;
++ break;
++ case 'p':
++ if (strcmp (name, ".pdata") == 0)
++ r_symndx = RELOC_SECTION_PDATA;
++ break;
++ case 'r':
++ if (strcmp (name, ".rdata") == 0)
++ r_symndx = RELOC_SECTION_RDATA;
++ else if (strcmp (name, ".rconst") == 0)
++ r_symndx = RELOC_SECTION_RCONST;
++ break;
++ case 's':
++ if (strcmp (name, ".sdata") == 0)
++ r_symndx = RELOC_SECTION_SDATA;
++ else if (strcmp (name, ".sbss") == 0)
++ r_symndx = RELOC_SECTION_SBSS;
++ break;
++ case 't':
++ if (strcmp (name, ".text") == 0)
++ r_symndx = RELOC_SECTION_TEXT;
++ break;
++ case 'x':
++ if (strcmp (name, ".xdata") == 0)
++ r_symndx = RELOC_SECTION_XDATA;
++ break;
++ }
++
++ if (r_symndx == (unsigned long) -1)
++ abort ();
++
++ /* Add the section VMA and the symbol value. */
++ relocation = (h->root.u.def.value
++ + hsec->output_section->vma
++ + hsec->output_offset);
++ }
++ else
++ {
++ /* Change the symndx value to the right one for
++ the output BFD. */
++ r_symndx = h->indx;
++ if (r_symndx == (unsigned long) -1)
++ {
++ /* Caller must give an error. */
++ r_symndx = 0;
++ }
++ relocation = 0;
++ }
++
++ /* Write out the new r_symndx value. */
++ H_PUT_32 (input_bfd, r_symndx, ext_rel->r_symndx);
++
++ return relocation;
++}
++
++/* Relocate a section while linking an SW64 ECOFF file. This is
++ quite similar to get_relocated_section_contents. Perhaps they
++ could be combined somehow. */
++
++static bool
++sw64_relocate_section (bfd *output_bfd,
++ struct bfd_link_info *info,
++ bfd *input_bfd,
++ asection *input_section,
++ bfd_byte *contents,
++ void * external_relocs)
++{
++ asection **symndx_to_section, *lita_sec;
++ struct ecoff_link_hash_entry **sym_hashes;
++ bfd_vma gp;
++ bool gp_undefined;
++ bfd_vma stack[RELOC_STACKSIZE];
++ int tos = 0;
++ struct external_reloc *ext_rel;
++ struct external_reloc *ext_rel_end;
++ bfd_size_type amt;
++
++ /* We keep a table mapping the symndx found in an internal reloc to
++ the appropriate section. This is faster than looking up the
++ section by name each time. */
++ symndx_to_section = ecoff_data (input_bfd)->symndx_to_section;
++ if (symndx_to_section == (asection **) NULL)
++ {
++ amt = NUM_RELOC_SECTIONS * sizeof (asection *);
++ symndx_to_section = (asection **) bfd_alloc (input_bfd, amt);
++ if (!symndx_to_section)
++ return false;
++
++ symndx_to_section[RELOC_SECTION_NONE] = NULL;
++ symndx_to_section[RELOC_SECTION_TEXT] =
++ bfd_get_section_by_name (input_bfd, ".text");
++ symndx_to_section[RELOC_SECTION_RDATA] =
++ bfd_get_section_by_name (input_bfd, ".rdata");
++ symndx_to_section[RELOC_SECTION_DATA] =
++ bfd_get_section_by_name (input_bfd, ".data");
++ symndx_to_section[RELOC_SECTION_SDATA] =
++ bfd_get_section_by_name (input_bfd, ".sdata");
++ symndx_to_section[RELOC_SECTION_SBSS] =
++ bfd_get_section_by_name (input_bfd, ".sbss");
++ symndx_to_section[RELOC_SECTION_BSS] =
++ bfd_get_section_by_name (input_bfd, ".bss");
++ symndx_to_section[RELOC_SECTION_INIT] =
++ bfd_get_section_by_name (input_bfd, ".init");
++ symndx_to_section[RELOC_SECTION_LIT8] =
++ bfd_get_section_by_name (input_bfd, ".lit8");
++ symndx_to_section[RELOC_SECTION_LIT4] =
++ bfd_get_section_by_name (input_bfd, ".lit4");
++ symndx_to_section[RELOC_SECTION_XDATA] =
++ bfd_get_section_by_name (input_bfd, ".xdata");
++ symndx_to_section[RELOC_SECTION_PDATA] =
++ bfd_get_section_by_name (input_bfd, ".pdata");
++ symndx_to_section[RELOC_SECTION_FINI] =
++ bfd_get_section_by_name (input_bfd, ".fini");
++ symndx_to_section[RELOC_SECTION_LITA] =
++ bfd_get_section_by_name (input_bfd, ".lita");
++ symndx_to_section[RELOC_SECTION_ABS] = bfd_abs_section_ptr;
++ symndx_to_section[RELOC_SECTION_RCONST] =
++ bfd_get_section_by_name (input_bfd, ".rconst");
++
++ ecoff_data (input_bfd)->symndx_to_section = symndx_to_section;
++ }
++
++ sym_hashes = ecoff_data (input_bfd)->sym_hashes;
++
++ /* On the SW64, the .lita section must be addressable by the global
++ pointer. To support large programs, we need to allow multiple
++ global pointers. This works as long as each input .lita section
++ is <64KB big. This implies that when producing relocatable
++ output, the .lita section is limited to 64KB. . */
++
++ lita_sec = symndx_to_section[RELOC_SECTION_LITA];
++ gp = _bfd_get_gp_value (output_bfd);
++ if (! bfd_link_relocatable (info) && lita_sec != NULL)
++ {
++ struct ecoff_section_tdata *lita_sec_data;
++
++ /* Make sure we have a section data structure to which we can
++ hang on to the gp value we pick for the section. */
++ lita_sec_data = ecoff_section_data (input_bfd, lita_sec);
++ if (lita_sec_data == NULL)
++ {
++ amt = sizeof (struct ecoff_section_tdata);
++ lita_sec_data = ((struct ecoff_section_tdata *)
++ bfd_zalloc (input_bfd, amt));
++ lita_sec->used_by_bfd = lita_sec_data;
++ }
++
++ if (lita_sec_data->gp != 0)
++ {
++ /* If we already assigned a gp to this section, we better
++ stick with that value. */
++ gp = lita_sec_data->gp;
++ }
++ else
++ {
++ bfd_vma lita_vma;
++ bfd_size_type lita_size;
++
++ lita_vma = lita_sec->output_offset + lita_sec->output_section->vma;
++ lita_size = lita_sec->size;
++
++ if (gp == 0
++ || lita_vma < gp - 0x8000
++ || lita_vma + lita_size >= gp + 0x8000)
++ {
++ /* Either gp hasn't been set at all or the current gp
++ cannot address this .lita section. In both cases we
++ reset the gp to point into the "middle" of the
++ current input .lita section. */
++ if (gp && !ecoff_data (output_bfd)->issued_multiple_gp_warning)
++ {
++ (*info->callbacks->warning) (info,
++ _("using multiple gp values"),
++ (char *) NULL, output_bfd,
++ (asection *) NULL, (bfd_vma) 0);
++ ecoff_data (output_bfd)->issued_multiple_gp_warning = true;
++ }
++ if (lita_vma < gp - 0x8000)
++ gp = lita_vma + lita_size - 0x8000;
++ else
++ gp = lita_vma + 0x8000;
++
++ }
++
++ lita_sec_data->gp = gp;
++ }
++
++ _bfd_set_gp_value (output_bfd, gp);
++ }
++
++ gp_undefined = (gp == 0);
++
++ BFD_ASSERT (bfd_header_little_endian (output_bfd));
++ BFD_ASSERT (bfd_header_little_endian (input_bfd));
++
++ ext_rel = (struct external_reloc *) external_relocs;
++ ext_rel_end = ext_rel + input_section->reloc_count;
++ for (; ext_rel < ext_rel_end; ext_rel++)
++ {
++ bfd_vma r_vaddr;
++ unsigned long r_symndx;
++ int r_type;
++ int r_extern;
++ int r_offset;
++ int r_size;
++ bool relocatep;
++ bool adjust_addrp;
++ bool gp_usedp;
++ bfd_vma addend;
++
++ r_vaddr = H_GET_64 (input_bfd, ext_rel->r_vaddr);
++ r_symndx = H_GET_32 (input_bfd, ext_rel->r_symndx);
++
++ r_type = ((ext_rel->r_bits[0] & RELOC_BITS0_TYPE_LITTLE)
++ >> RELOC_BITS0_TYPE_SH_LITTLE);
++ r_extern = (ext_rel->r_bits[1] & RELOC_BITS1_EXTERN_LITTLE) != 0;
++ r_offset = ((ext_rel->r_bits[1] & RELOC_BITS1_OFFSET_LITTLE)
++ >> RELOC_BITS1_OFFSET_SH_LITTLE);
++ /* Ignored the reserved bits. */
++ r_size = ((ext_rel->r_bits[3] & RELOC_BITS3_SIZE_LITTLE)
++ >> RELOC_BITS3_SIZE_SH_LITTLE);
++
++ relocatep = false;
++ adjust_addrp = true;
++ gp_usedp = false;
++ addend = 0;
++
++ switch (r_type)
++ {
++ case SW64_R_GPRELHIGH:
++ _bfd_error_handler (_("%pB: %s unsupported"),
++ input_bfd, "SW64_R_GPRELHIGH");
++ bfd_set_error (bfd_error_bad_value);
++ continue;
++
++ case SW64_R_GPRELLOW:
++ _bfd_error_handler (_("%pB: %s unsupported"),
++ input_bfd, "SW64_R_GPRELLOW");
++ bfd_set_error (bfd_error_bad_value);
++ continue;
++
++ default:
++ /* xgettext:c-format */
++ _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
++ input_bfd, (int) r_type);
++ bfd_set_error (bfd_error_bad_value);
++ continue;
++
++ case SW64_R_IGNORE:
++ /* This reloc appears after a GPDISP reloc. On earlier
++ versions of OSF/1, It marked the position of the second
++ instruction to be altered by the GPDISP reloc, but it is
++ not otherwise used for anything. For some reason, the
++ address of the relocation does not appear to include the
++ section VMA, unlike the other relocation types. */
++ if (bfd_link_relocatable (info))
++ H_PUT_64 (input_bfd, input_section->output_offset + r_vaddr,
++ ext_rel->r_vaddr);
++ adjust_addrp = false;
++ break;
++
++ case SW64_R_REFLONG:
++ case SW64_R_REFQUAD:
++ case SW64_R_HINT:
++ relocatep = true;
++ break;
++
++ case SW64_R_BRADDR:
++ case SW64_R_SREL16:
++ case SW64_R_SREL32:
++ case SW64_R_SREL64:
++ if (r_extern)
++ addend += - (r_vaddr + 4);
++ relocatep = true;
++ break;
++
++ case SW64_R_GPREL32:
++ /* This relocation is used in a switch table. It is a 32
++ bit offset from the current GP value. We must adjust it
++ by the different between the original GP value and the
++ current GP value. */
++ relocatep = true;
++ addend = ecoff_data (input_bfd)->gp - gp;
++ gp_usedp = true;
++ break;
++
++ case SW64_R_LITERAL:
++ /* This is a reference to a literal value, generally
++ (always?) in the .lita section. This is a 16 bit GP
++ relative relocation. Sometimes the subsequent reloc is a
++ LITUSE reloc, which indicates how this reloc is used.
++ This sometimes permits rewriting the two instructions
++ referred to by the LITERAL and the LITUSE into different
++ instructions which do not refer to .lita. This can save
++ a memory reference, and permits removing a value from
++ .lita thus saving GP relative space.
++
++ We do not these optimizations. To do them we would need
++ to arrange to link the .lita section first, so that by
++ the time we got here we would know the final values to
++ use. This would not be particularly difficult, but it is
++ not currently implemented. */
++
++ /* I believe that the LITERAL reloc will only apply to a ldq
++ or ldl instruction, so check my assumption. */
++ {
++ unsigned long insn;
++
++ insn = bfd_get_32 (input_bfd,
++ contents + r_vaddr - input_section->vma);
++ BFD_ASSERT (((insn >> 26) & 0x3f) == 0x29
++ || ((insn >> 26) & 0x3f) == 0x28);
++ }
++
++ relocatep = true;
++ addend = ecoff_data (input_bfd)->gp - gp;
++ gp_usedp = true;
++ break;
++
++ case SW64_R_LITUSE:
++ /* See SW64_R_LITERAL above for the uses of this reloc. It
++ does not cause anything to happen, itself. */
++ break;
++
++ case SW64_R_GPDISP:
++ /* This marks the ldah of an ldah/lda pair which loads the
++ gp register with the difference of the gp value and the
++ current location. The second of the pair is r_symndx
++ bytes ahead. It used to be marked with an SW64_R_IGNORE
++ reloc, but OSF/1 3.2 no longer does that. */
++ {
++ unsigned long insn1, insn2;
++
++ /* Get the two instructions. */
++ insn1 = bfd_get_32 (input_bfd,
++ contents + r_vaddr - input_section->vma);
++ insn2 = bfd_get_32 (input_bfd,
++ (contents
++ + r_vaddr
++ - input_section->vma
++ + r_symndx));
++
++ BFD_ASSERT (((insn1 >> 26) & 0x3f) == 0x09); /* ldah */
++ BFD_ASSERT (((insn2 >> 26) & 0x3f) == 0x08); /* lda */
++
++ /* Get the existing addend. We must account for the sign
++ extension done by lda and ldah. */
++ addend = ((insn1 & 0xffff) << 16) + (insn2 & 0xffff);
++ if (insn1 & 0x8000)
++ {
++ /* This is addend -= 0x100000000 without causing an
++ integer overflow on a 32 bit host. */
++ addend -= 0x80000000;
++ addend -= 0x80000000;
++ }
++ if (insn2 & 0x8000)
++ addend -= 0x10000;
++
++ /* The existing addend includes the difference between the
++ gp of the input BFD and the address in the input BFD.
++ We want to change this to the difference between the
++ final GP and the final address. */
++ addend += (gp
++ - ecoff_data (input_bfd)->gp
++ + input_section->vma
++ - (input_section->output_section->vma
++ + input_section->output_offset));
++
++ /* Change the instructions, accounting for the sign
++ extension, and write them out. */
++ if (addend & 0x8000)
++ addend += 0x10000;
++ insn1 = (insn1 & 0xffff0000) | ((addend >> 16) & 0xffff);
++ insn2 = (insn2 & 0xffff0000) | (addend & 0xffff);
++
++ bfd_put_32 (input_bfd, (bfd_vma) insn1,
++ contents + r_vaddr - input_section->vma);
++ bfd_put_32 (input_bfd, (bfd_vma) insn2,
++ contents + r_vaddr - input_section->vma + r_symndx);
++
++ gp_usedp = true;
++ }
++ break;
++
++ case SW64_R_OP_PUSH:
++ case SW64_R_OP_PSUB:
++ case SW64_R_OP_PRSHIFT:
++ /* Manipulate values on the reloc evaluation stack. The
++ r_vaddr field is not an address in input_section, it is
++ the current value (including any addend) of the object
++ being used. */
++ if (! r_extern)
++ {
++ asection *s;
++
++ s = symndx_to_section[r_symndx];
++ if (s == (asection *) NULL)
++ abort ();
++ addend = s->output_section->vma + s->output_offset - s->vma;
++ }
++ else
++ {
++ struct ecoff_link_hash_entry *h;
++
++ h = sym_hashes[r_symndx];
++ if (h == (struct ecoff_link_hash_entry *) NULL)
++ abort ();
++
++ if (! bfd_link_relocatable (info))
++ {
++ if (h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak)
++ addend = (h->root.u.def.value
++ + h->root.u.def.section->output_section->vma
++ + h->root.u.def.section->output_offset);
++ else
++ {
++ /* Note that we pass the address as 0, since we
++ do not have a meaningful number for the
++ location within the section that is being
++ relocated. */
++ (*info->callbacks->undefined_symbol)
++ (info, h->root.root.string, input_bfd,
++ input_section, (bfd_vma) 0, true);
++ addend = 0;
++ }
++ }
++ else
++ {
++ if (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak
++ && h->indx == -1)
++ {
++ /* This symbol is not being written out. Pass
++ the address as 0, as with undefined_symbol,
++ above. */
++ (*info->callbacks->unattached_reloc)
++ (info, h->root.root.string,
++ input_bfd, input_section, (bfd_vma) 0);
++ }
++
++ addend = sw64_convert_external_reloc (output_bfd, info,
++ input_bfd,
++ ext_rel, h);
++ }
++ }
++
++ addend += r_vaddr;
++
++ if (bfd_link_relocatable (info))
++ {
++ /* Adjust r_vaddr by the addend. */
++ H_PUT_64 (input_bfd, addend, ext_rel->r_vaddr);
++ }
++ else
++ {
++ switch (r_type)
++ {
++ case SW64_R_OP_PUSH:
++ if (tos >= RELOC_STACKSIZE)
++ abort ();
++ stack[tos++] = addend;
++ break;
++
++ case SW64_R_OP_PSUB:
++ if (tos == 0)
++ abort ();
++ stack[tos - 1] -= addend;
++ break;
++
++ case SW64_R_OP_PRSHIFT:
++ if (tos == 0)
++ abort ();
++ stack[tos - 1] >>= addend;
++ break;
++ }
++ }
++
++ adjust_addrp = false;
++ break;
++
++ case SW64_R_OP_STORE:
++ /* Store a value from the reloc stack into a bitfield. If
++ we are generating relocatable output, all we do is
++ adjust the address of the reloc. */
++ if (! bfd_link_relocatable (info))
++ {
++ bfd_vma mask;
++ bfd_vma val;
++
++ if (tos == 0)
++ abort ();
++
++ /* Get the relocation mask. The separate steps and the
++ casts to bfd_vma are attempts to avoid a bug in the
++ SW64 OSF 1.3 C compiler. See reloc.c for more
++ details. */
++ mask = 1;
++ mask <<= (bfd_vma) r_size;
++ mask -= 1;
++
++ /* FIXME: I don't know what kind of overflow checking,
++ if any, should be done here. */
++ val = bfd_get_64 (input_bfd,
++ contents + r_vaddr - input_section->vma);
++ val &=~ mask << (bfd_vma) r_offset;
++ val |= (stack[--tos] & mask) << (bfd_vma) r_offset;
++ bfd_put_64 (input_bfd, val,
++ contents + r_vaddr - input_section->vma);
++ }
++ break;
++
++ case SW64_R_GPVALUE:
++ /* I really don't know if this does the right thing. */
++ gp = ecoff_data (input_bfd)->gp + r_symndx;
++ gp_undefined = false;
++ break;
++ }
++
++ if (relocatep)
++ {
++ reloc_howto_type *howto;
++ struct ecoff_link_hash_entry *h = NULL;
++ asection *s = NULL;
++ bfd_vma relocation;
++ bfd_reloc_status_type r;
++
++ /* Perform a relocation. */
++
++ howto = &sw64_howto_table[r_type];
++
++ if (r_extern)
++ {
++ h = sym_hashes[r_symndx];
++ /* If h is NULL, that means that there is a reloc
++ against an external symbol which we thought was just
++ a debugging symbol. This should not happen. */
++ if (h == (struct ecoff_link_hash_entry *) NULL)
++ abort ();
++ }
++ else
++ {
++ if (r_symndx >= NUM_RELOC_SECTIONS)
++ s = NULL;
++ else
++ s = symndx_to_section[r_symndx];
++
++ if (s == (asection *) NULL)
++ abort ();
++ }
++
++ if (bfd_link_relocatable (info))
++ {
++ /* We are generating relocatable output, and must
++ convert the existing reloc. */
++ if (r_extern)
++ {
++ if (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak
++ && h->indx == -1)
++ {
++ /* This symbol is not being written out. */
++ (*info->callbacks->unattached_reloc)
++ (info, h->root.root.string, input_bfd,
++ input_section, r_vaddr - input_section->vma);
++ }
++
++ relocation = sw64_convert_external_reloc (output_bfd,
++ info,
++ input_bfd,
++ ext_rel,
++ h);
++ }
++ else
++ {
++ /* This is a relocation against a section. Adjust
++ the value by the amount the section moved. */
++ relocation = (s->output_section->vma
++ + s->output_offset
++ - s->vma);
++ }
++
++ /* If this is PC relative, the existing object file
++ appears to already have the reloc worked out. We
++ must subtract out the old value and add in the new
++ one. */
++ if (howto->pc_relative)
++ relocation -= (input_section->output_section->vma
++ + input_section->output_offset
++ - input_section->vma);
++
++ /* Put in any addend. */
++ relocation += addend;
++
++ /* Adjust the contents. */
++ r = _bfd_relocate_contents (howto, input_bfd, relocation,
++ (contents
++ + r_vaddr
++ - input_section->vma));
++ }
++ else
++ {
++ /* We are producing a final executable. */
++ if (r_extern)
++ {
++ /* This is a reloc against a symbol. */
++ if (h->root.type == bfd_link_hash_defined
++ || h->root.type == bfd_link_hash_defweak)
++ {
++ asection *hsec;
++
++ hsec = h->root.u.def.section;
++ relocation = (h->root.u.def.value
++ + hsec->output_section->vma
++ + hsec->output_offset);
++ }
++ else
++ {
++ (*info->callbacks->undefined_symbol)
++ (info, h->root.root.string, input_bfd, input_section,
++ r_vaddr - input_section->vma, true);
++ relocation = 0;
++ }
++ }
++ else
++ {
++ /* This is a reloc against a section. */
++ relocation = (s->output_section->vma
++ + s->output_offset
++ - s->vma);
++
++ /* Adjust a PC relative relocation by removing the
++ reference to the original source section. */
++ if (howto->pc_relative)
++ relocation += input_section->vma;
++ }
++
++ r = _bfd_final_link_relocate (howto,
++ input_bfd,
++ input_section,
++ contents,
++ r_vaddr - input_section->vma,
++ relocation,
++ addend);
++ }
++
++ if (r != bfd_reloc_ok)
++ {
++ switch (r)
++ {
++ default:
++ case bfd_reloc_outofrange:
++ abort ();
++ case bfd_reloc_overflow:
++ {
++ const char *name;
++
++ if (r_extern)
++ name = sym_hashes[r_symndx]->root.root.string;
++ else
++ name = bfd_section_name (symndx_to_section[r_symndx]);
++ (*info->callbacks->reloc_overflow)
++ (info, NULL, name, sw64_howto_table[r_type].name,
++ (bfd_vma) 0, input_bfd, input_section,
++ r_vaddr - input_section->vma);
++ }
++ break;
++ }
++ }
++ }
++
++ if (bfd_link_relocatable (info) && adjust_addrp)
++ {
++ /* Change the address of the relocation. */
++ H_PUT_64 (input_bfd,
++ (input_section->output_section->vma
++ + input_section->output_offset
++ - input_section->vma
++ + r_vaddr),
++ ext_rel->r_vaddr);
++ }
++
++ if (gp_usedp && gp_undefined)
++ {
++ (*info->callbacks->reloc_dangerous)
++ (info, _("GP relative relocation used when GP not defined"),
++ input_bfd, input_section, r_vaddr - input_section->vma);
++ /* Only give the error once per link. */
++ gp = 4;
++ _bfd_set_gp_value (output_bfd, gp);
++ gp_undefined = false;
++ }
++ }
++
++ if (tos != 0)
++ abort ();
++
++ return true;
++}
++
++/* Do final adjustments to the filehdr and the aouthdr. This routine
++ sets the dynamic bits in the file header. */
++
++static bool
++sw64_adjust_headers (bfd *abfd,
++ struct internal_filehdr *fhdr,
++ struct internal_aouthdr *ahdr ATTRIBUTE_UNUSED)
++{
++ if ((abfd->flags & (DYNAMIC | EXEC_P)) == (DYNAMIC | EXEC_P))
++ fhdr->f_flags |= F_SW64_CALL_SHARED;
++ else if ((abfd->flags & DYNAMIC) != 0)
++ fhdr->f_flags |= F_SW64_SHARABLE;
++ return true;
++}
++
++/* Archive handling. In OSF/1 (or Digital Unix) v3.2, Digital
++ introduced archive packing, in which the elements in an archive are
++ optionally compressed using a simple dictionary scheme. We know
++ how to read such archives, but we don't write them. */
++
++#define sw64_ecoff_slurp_armap _bfd_ecoff_slurp_armap
++#define sw64_ecoff_slurp_extended_name_table \
++ _bfd_ecoff_slurp_extended_name_table
++#define sw64_ecoff_construct_extended_name_table \
++ _bfd_ecoff_construct_extended_name_table
++#define sw64_ecoff_truncate_arname _bfd_ecoff_truncate_arname
++#define sw64_ecoff_write_armap _bfd_ecoff_write_armap
++#define sw64_ecoff_write_ar_hdr _bfd_generic_write_ar_hdr
++#define sw64_ecoff_generic_stat_arch_elt _bfd_ecoff_generic_stat_arch_elt
++#define sw64_ecoff_update_armap_timestamp _bfd_ecoff_update_armap_timestamp
++
++/* A compressed file uses this instead of ARFMAG. */
++
++#define ARFZMAG "Z\012"
++
++/* Read an archive header. This is like the standard routine, but it
++ also accepts ARFZMAG. */
++
++static void *
++sw64_ecoff_read_ar_hdr (bfd *abfd)
++{
++ struct areltdata *ret;
++ struct ar_hdr *h;
++
++ ret = (struct areltdata *) _bfd_generic_read_ar_hdr_mag (abfd, ARFZMAG);
++ if (ret == NULL)
++ return NULL;
++
++ h = (struct ar_hdr *) ret->arch_header;
++ if (strncmp (h->ar_fmag, ARFZMAG, 2) == 0)
++ {
++ bfd_byte ab[8];
++
++ /* This is a compressed file. We must set the size correctly.
++ The size is the eight bytes after the dummy file header. */
++ if (bfd_seek (abfd, FILHSZ, SEEK_CUR) != 0
++ || bfd_read (ab, 8, abfd) != 8
++ || bfd_seek (abfd, -(FILHSZ + 8), SEEK_CUR) != 0)
++ {
++ free (ret);
++ return NULL;
++ }
++
++ ret->parsed_size = H_GET_64 (abfd, ab);
++ }
++
++ return ret;
++}
++
++/* Get an archive element at a specified file position. This is where
++ we uncompress the archive element if necessary. */
++
++static bfd *
++sw64_ecoff_get_elt_at_filepos (bfd *archive, file_ptr filepos,
++ struct bfd_link_info *info)
++{
++ bfd *nbfd = NULL;
++ struct areltdata *tdata;
++ struct ar_hdr *hdr;
++ bfd_byte ab[8];
++ bfd_size_type size;
++ bfd_byte *buf, *p;
++ struct bfd_in_memory *bim;
++ ufile_ptr filesize;
++
++ buf = NULL;
++ nbfd = _bfd_get_elt_at_filepos (archive, filepos, info);
++ if (nbfd == NULL)
++ goto error_return;
++
++ if ((nbfd->flags & BFD_IN_MEMORY) != 0)
++ {
++ /* We have already expanded this BFD. */
++ return nbfd;
++ }
++
++ tdata = (struct areltdata *) nbfd->arelt_data;
++ hdr = (struct ar_hdr *) tdata->arch_header;
++ if (strncmp (hdr->ar_fmag, ARFZMAG, 2) != 0)
++ return nbfd;
++
++ /* We must uncompress this element. We do this by copying it into a
++ memory buffer, and making bfd_read and bfd_seek use that buffer.
++ This can use a lot of memory, but it's simpler than getting a
++ temporary file, making that work with the file descriptor caching
++ code, and making sure that it is deleted at all appropriate
++ times. It can be changed if it ever becomes important. */
++
++ /* The compressed file starts with a dummy ECOFF file header. */
++ if (bfd_seek (nbfd, FILHSZ, SEEK_SET) != 0)
++ goto error_return;
++
++ /* The next eight bytes are the real file size. */
++ if (bfd_read (ab, 8, nbfd) != 8)
++ goto error_return;
++ size = H_GET_64 (nbfd, ab);
++
++ /* The decompression algorithm will at most expand by eight times. */
++ filesize = bfd_get_file_size (archive);
++ if (filesize != 0 && size / 8 > filesize)
++ {
++ bfd_set_error (bfd_error_malformed_archive);
++ goto error_return;
++ }
++
++ if (size != 0)
++ {
++ bfd_size_type left;
++ bfd_byte dict[4096];
++ unsigned int h;
++ bfd_byte b;
++
++ buf = (bfd_byte *) bfd_malloc (size);
++ if (buf == NULL)
++ goto error_return;
++ p = buf;
++
++ left = size;
++
++ /* I don't know what the next eight bytes are for. */
++ if (bfd_read (ab, 8, nbfd) != 8)
++ goto error_return;
++
++ /* This is the uncompression algorithm. It's a simple
++ dictionary based scheme in which each character is predicted
++ by a hash of the previous three characters. A control byte
++ indicates whether the character is predicted or whether it
++ appears in the input stream; each control byte manages the
++ next eight bytes in the output stream. */
++ memset (dict, 0, sizeof dict);
++ h = 0;
++ while (bfd_read (&b, 1, nbfd) == 1)
++ {
++ unsigned int i;
++
++ for (i = 0; i < 8; i++, b >>= 1)
++ {
++ bfd_byte n;
++
++ if ((b & 1) == 0)
++ n = dict[h];
++ else
++ {
++ if (bfd_read (&n, 1, nbfd) != 1)
++ goto error_return;
++ dict[h] = n;
++ }
++
++ *p++ = n;
++
++ --left;
++ if (left == 0)
++ break;
++
++ h <<= 4;
++ h ^= n;
++ h &= sizeof dict - 1;
++ }
++
++ if (left == 0)
++ break;
++ }
++ }
++
++ /* Now the uncompressed file contents are in buf. */
++ bim = ((struct bfd_in_memory *)
++ bfd_malloc ((bfd_size_type) sizeof (struct bfd_in_memory)));
++ if (bim == NULL)
++ goto error_return;
++ bim->size = size;
++ bim->buffer = buf;
++
++ nbfd->mtime_set = true;
++ nbfd->mtime = strtol (hdr->ar_date, (char **) NULL, 10);
++
++ nbfd->flags |= BFD_IN_MEMORY;
++ nbfd->iostream = bim;
++ nbfd->iovec = &_bfd_memory_iovec;
++ nbfd->origin = 0;
++ nbfd->size = 0;
++ BFD_ASSERT (! nbfd->cacheable);
++
++ return nbfd;
++
++ error_return:
++ free (buf);
++ if (nbfd != NULL)
++ bfd_close (nbfd);
++ return NULL;
++}
++
++/* Open the next archived file. */
++
++static bfd *
++sw64_ecoff_openr_next_archived_file (bfd *archive, bfd *last_file)
++{
++ ufile_ptr filestart;
++
++ if (last_file == NULL)
++ filestart = bfd_ardata (archive)->first_file_filepos;
++ else
++ {
++ struct areltdata *t;
++ struct ar_hdr *h;
++ bfd_size_type size;
++
++ /* We can't use arelt_size here, because that uses parsed_size,
++ which is the uncompressed size. We need the compressed size. */
++ t = (struct areltdata *) last_file->arelt_data;
++ h = (struct ar_hdr *) t->arch_header;
++ size = strtol (h->ar_size, (char **) NULL, 10);
++
++ /* Pad to an even boundary...
++ Note that last_file->origin can be odd in the case of
++ BSD-4.4-style element with a long odd size. */
++ filestart = last_file->proxy_origin + size;
++ filestart += filestart % 2;
++ if (filestart < last_file->proxy_origin)
++ {
++ /* Prevent looping. See PR19256. */
++ bfd_set_error (bfd_error_malformed_archive);
++ return NULL;
++ }
++ }
++
++ return sw64_ecoff_get_elt_at_filepos (archive, filestart, NULL);
++}
++
++/* Open the archive file given an index into the armap. */
++
++static bfd *
++sw64_ecoff_get_elt_at_index (bfd *abfd, symindex sym_index)
++{
++ carsym *entry;
++
++ entry = bfd_ardata (abfd)->symdefs + sym_index;
++ return sw64_ecoff_get_elt_at_filepos (abfd, entry->file_offset,
++ NULL);
++}
++
++static void
++sw64_ecoff_swap_coff_aux_in (bfd *abfd ATTRIBUTE_UNUSED,
++ void *ext1 ATTRIBUTE_UNUSED,
++ int type ATTRIBUTE_UNUSED,
++ int in_class ATTRIBUTE_UNUSED,
++ int indx ATTRIBUTE_UNUSED,
++ int numaux ATTRIBUTE_UNUSED,
++ void *in1 ATTRIBUTE_UNUSED)
++{
++}
++
++static void
++sw64_ecoff_swap_coff_sym_in (bfd *abfd ATTRIBUTE_UNUSED,
++ void *ext1 ATTRIBUTE_UNUSED,
++ void *in1 ATTRIBUTE_UNUSED)
++{
++}
++
++static void
++sw64_ecoff_swap_coff_lineno_in (bfd *abfd ATTRIBUTE_UNUSED,
++ void *ext1 ATTRIBUTE_UNUSED,
++ void *in1 ATTRIBUTE_UNUSED)
++{
++}
++
++static unsigned int
++sw64_ecoff_swap_coff_aux_out (bfd *abfd ATTRIBUTE_UNUSED,
++ void *inp ATTRIBUTE_UNUSED,
++ int type ATTRIBUTE_UNUSED,
++ int in_class ATTRIBUTE_UNUSED,
++ int indx ATTRIBUTE_UNUSED,
++ int numaux ATTRIBUTE_UNUSED,
++ void *extp ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++static unsigned int
++sw64_ecoff_swap_coff_sym_out (bfd *abfd ATTRIBUTE_UNUSED,
++ void *inp ATTRIBUTE_UNUSED,
++ void *extp ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++static unsigned int
++sw64_ecoff_swap_coff_lineno_out (bfd *abfd ATTRIBUTE_UNUSED,
++ void *inp ATTRIBUTE_UNUSED,
++ void *extp ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++static unsigned int
++sw64_ecoff_swap_coff_reloc_out (bfd *abfd ATTRIBUTE_UNUSED,
++ void *inp ATTRIBUTE_UNUSED,
++ void *extp ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++/* This is the ECOFF backend structure. The backend field of the
++ target vector points to this. */
++
++static const struct ecoff_backend_data sw64_ecoff_backend_data =
++{
++ /* COFF backend structure. */
++ {
++ sw64_ecoff_swap_coff_aux_in, sw64_ecoff_swap_coff_sym_in,
++ sw64_ecoff_swap_coff_lineno_in, sw64_ecoff_swap_coff_aux_out,
++ sw64_ecoff_swap_coff_sym_out, sw64_ecoff_swap_coff_lineno_out,
++ sw64_ecoff_swap_coff_reloc_out,
++ sw64_ecoff_swap_filehdr_out, sw64_ecoff_swap_aouthdr_out,
++ sw64_ecoff_swap_scnhdr_out,
++ FILHSZ, AOUTSZ, SCNHSZ, 0, 0, 0, 0, FILNMLEN, true,
++ ECOFF_NO_LONG_SECTION_NAMES, 4, false, 2, 32768,
++ sw64_ecoff_swap_filehdr_in, sw64_ecoff_swap_aouthdr_in,
++ sw64_ecoff_swap_scnhdr_in, NULL,
++ sw64_ecoff_bad_format_hook, _bfd_ecoff_set_arch_mach_hook,
++ sw64_ecoff_mkobject_hook, _bfd_ecoff_styp_to_sec_flags,
++ _bfd_ecoff_set_alignment_hook, _bfd_ecoff_slurp_symbol_table,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL
++ },
++ /* Supported architecture. */
++ bfd_arch_sw64,
++ /* Initial portion of armap string. */
++ "________64",
++ /* The page boundary used to align sections in a demand-paged
++ executable file. E.g., 0x1000. */
++ 0x2000,
++ /* TRUE if the .rdata section is part of the text segment, as on the
++ SW64. FALSE if .rdata is part of the data segment, as on the
++ MIPS. */
++ true,
++ /* Bitsize of constructor entries. */
++ 64,
++ /* Reloc to use for constructor entries. */
++ &sw64_howto_table[SW64_R_REFQUAD],
++ {
++ /* Symbol table magic number. */
++ magicSym2,
++ /* Alignment of debugging information. E.g., 4. */
++ 8,
++ /* Sizes of external symbolic information. */
++ sizeof (struct hdr_ext),
++ sizeof (struct dnr_ext),
++ sizeof (struct pdr_ext),
++ sizeof (struct sym_ext),
++ sizeof (struct opt_ext),
++ sizeof (struct fdr_ext),
++ sizeof (struct rfd_ext),
++ sizeof (struct ext_ext),
++ /* Functions to swap in external symbolic data. */
++ ecoff_swap_hdr_in,
++ ecoff_swap_dnr_in,
++ ecoff_swap_pdr_in,
++ ecoff_swap_sym_in,
++ ecoff_swap_opt_in,
++ ecoff_swap_fdr_in,
++ ecoff_swap_rfd_in,
++ ecoff_swap_ext_in,
++ _bfd_ecoff_swap_tir_in,
++ _bfd_ecoff_swap_rndx_in,
++ /* Functions to swap out external symbolic data. */
++ ecoff_swap_hdr_out,
++ ecoff_swap_dnr_out,
++ ecoff_swap_pdr_out,
++ ecoff_swap_sym_out,
++ ecoff_swap_opt_out,
++ ecoff_swap_fdr_out,
++ ecoff_swap_rfd_out,
++ ecoff_swap_ext_out,
++ _bfd_ecoff_swap_tir_out,
++ _bfd_ecoff_swap_rndx_out,
++ /* Function to read in symbolic data. */
++ _bfd_ecoff_slurp_symbolic_info
++ },
++ /* External reloc size. */
++ RELSZ,
++ /* Reloc swapping functions. */
++ sw64_ecoff_swap_reloc_in,
++ sw64_ecoff_swap_reloc_out,
++ /* Backend reloc tweaking. */
++ sw64_adjust_reloc_in,
++ sw64_adjust_reloc_out,
++ /* Relocate section contents while linking. */
++ sw64_relocate_section,
++ /* Do final adjustments to filehdr and aouthdr. */
++ sw64_adjust_headers,
++ /* Read an element from an archive at a given file position. */
++ sw64_ecoff_get_elt_at_filepos
++};
++
++/* Looking up a reloc type is SW64 specific. */
++#define _bfd_ecoff_bfd_reloc_type_lookup sw64_bfd_reloc_type_lookup
++#define _bfd_ecoff_bfd_reloc_name_lookup \
++ sw64_bfd_reloc_name_lookup
++
++/* So is getting relocated section contents. */
++#define _bfd_ecoff_bfd_get_relocated_section_contents \
++ sw64_ecoff_get_relocated_section_contents
++
++/* Handling file windows is generic. */
++#define _bfd_ecoff_get_section_contents_in_window \
++ _bfd_generic_get_section_contents_in_window
++
++/* Input section flag lookup is generic. */
++#define _bfd_ecoff_bfd_lookup_section_flags bfd_generic_lookup_section_flags
++
++/* Relaxing sections is generic. */
++#define _bfd_ecoff_bfd_relax_section bfd_generic_relax_section
++#define _bfd_ecoff_bfd_gc_sections bfd_generic_gc_sections
++#define _bfd_ecoff_bfd_merge_sections bfd_generic_merge_sections
++#define _bfd_ecoff_bfd_is_group_section bfd_generic_is_group_section
++#define _bfd_ecoff_bfd_group_name bfd_generic_group_name
++#define _bfd_ecoff_bfd_discard_group bfd_generic_discard_group
++#define _bfd_ecoff_section_already_linked \
++ _bfd_coff_section_already_linked
++#define _bfd_ecoff_bfd_define_common_symbol bfd_generic_define_common_symbol
++#define _bfd_ecoff_bfd_link_hide_symbol _bfd_generic_link_hide_symbol
++#define _bfd_ecoff_bfd_define_start_stop bfd_generic_define_start_stop
++#define _bfd_ecoff_bfd_link_check_relocs _bfd_generic_link_check_relocs
++
++/* Installing internal relocations in a section is also generic. */
++#define _bfd_ecoff_set_reloc _bfd_generic_set_reloc
++
++const bfd_target sw64_ecoff_le_vec =
++{
++ "ecoff-littlesw64", /* name */
++ bfd_target_ecoff_flavour,
++ BFD_ENDIAN_LITTLE, /* data byte order is little */
++ BFD_ENDIAN_LITTLE, /* header byte order is little */
++
++ (HAS_RELOC | EXEC_P /* object flags */
++ | HAS_LINENO | HAS_DEBUG
++ | HAS_SYMS | HAS_LOCALS | DYNAMIC | WP_TEXT | D_PAGED),
++
++ (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC | SEC_CODE
++ | SEC_DATA | SEC_SMALL_DATA),
++ 0, /* leading underscore */
++ ' ', /* ar_pad_char */
++ 15, /* ar_max_namelen */
++ 0, /* match priority. */
++ TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols. */
++ bfd_getl64, bfd_getl_signed_64, bfd_putl64,
++ bfd_getl32, bfd_getl_signed_32, bfd_putl32,
++ bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
++ bfd_getl64, bfd_getl_signed_64, bfd_putl64,
++ bfd_getl32, bfd_getl_signed_32, bfd_putl32,
++ bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
++
++ { /* bfd_check_format */
++ _bfd_dummy_target,
++ sw64_ecoff_object_p,
++ bfd_generic_archive_p,
++ _bfd_dummy_target
++ },
++ { /* bfd_set_format */
++ _bfd_bool_bfd_false_error,
++ _bfd_ecoff_mkobject,
++ _bfd_generic_mkarchive,
++ _bfd_bool_bfd_false_error
++ },
++ { /* bfd_write_contents */
++ _bfd_bool_bfd_false_error,
++ _bfd_ecoff_write_object_contents,
++ _bfd_write_archive_contents,
++ _bfd_bool_bfd_false_error
++ },
++
++ BFD_JUMP_TABLE_GENERIC (_bfd_ecoff),
++ BFD_JUMP_TABLE_COPY (_bfd_ecoff),
++ BFD_JUMP_TABLE_CORE (_bfd_nocore),
++ BFD_JUMP_TABLE_ARCHIVE (sw64_ecoff),
++ BFD_JUMP_TABLE_SYMBOLS (_bfd_ecoff),
++ BFD_JUMP_TABLE_RELOCS (_bfd_ecoff),
++ BFD_JUMP_TABLE_WRITE (_bfd_ecoff),
++ BFD_JUMP_TABLE_LINK (_bfd_ecoff),
++ BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
++
++ NULL,
++
++ &sw64_ecoff_backend_data
++};
+diff -Naur gdb-14.1-after-patch/bfd/config.bfd gdb-14.1-sw64/bfd/config.bfd
+--- gdb-14.1-after-patch/bfd/config.bfd 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/config.bfd 2025-03-03 10:59:12.960000000 +0800
+@@ -217,6 +217,7 @@
+ sh*) targ_archs=bfd_sh_arch ;;
+ sparc*) targ_archs=bfd_sparc_arch ;;
+ spu*) targ_archs=bfd_spu_arch ;;
++sw64*) targ_archs=bfd_sw64_arch ;;
+ tilegx*) targ_archs=bfd_tilegx_arch ;;
+ tilepro*) targ_archs=bfd_tilepro_arch ;;
+ v850*) targ_archs="bfd_v850_arch bfd_v850_rh850_arch" ;;
+@@ -328,6 +329,11 @@
+ targ_defvec=alpha_ecoff_le_vec
+ want64=true
+ ;;
++ sw64*-*-linux-* | sw64*-*-elf*)
++ targ_defvec=sw64_elf64_vec
++ targ_selvecs=sw64_ecoff_le_vec
++ want64=true
++ ;;
+ amdgcn-*-*)
+ targ_defvec=amdgcn_elf64_le_vec
+ want64=true
+diff -Naur gdb-14.1-after-patch/bfd/configure gdb-14.1-sw64/bfd/configure
+--- gdb-14.1-after-patch/bfd/configure 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/configure 2025-03-03 10:59:12.960000000 +0800
+@@ -13873,6 +13873,8 @@
+ aarch64_mach_o_vec) tb="$tb mach-o-aarch64.lo"; target_size=64 ;;
+ aarch64_pei_le_vec) tb="$tb pei-aarch64.lo pe-aarch64igen.lo $coff"; target_size=64 ;;
+ aarch64_pe_le_vec) tb="$tb pe-aarch64.lo pe-aarch64igen.lo $coff"; target_size=64 ;;
++ sw64_ecoff_le_vec) tb="$tb coff-sw64.lo ecoff.lo $ecoff"; target_size=64 ;;
++ sw64_elf64_vec) tb="$tb elf64-sw64.lo elf64.lo $elf"; target_size=64 ;;
+ alpha_ecoff_le_vec) tb="$tb coff-alpha.lo ecoff.lo $ecoff"; target_size=64 ;;
+ alpha_elf64_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
+ alpha_elf64_fbsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
+diff -Naur gdb-14.1-after-patch/bfd/configure.ac gdb-14.1-sw64/bfd/configure.ac
+--- gdb-14.1-after-patch/bfd/configure.ac 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/configure.ac 2025-03-03 10:59:12.960000000 +0800
+@@ -409,6 +409,8 @@
+ aarch64_mach_o_vec) tb="$tb mach-o-aarch64.lo"; target_size=64 ;;
+ aarch64_pei_le_vec) tb="$tb pei-aarch64.lo pe-aarch64igen.lo $coff"; target_size=64 ;;
+ aarch64_pe_le_vec) tb="$tb pe-aarch64.lo pe-aarch64igen.lo $coff"; target_size=64 ;;
++ sw64_ecoff_le_vec) tb="$tb coff-sw64.lo ecoff.lo $ecoff"; target_size=64 ;;
++ sw64_elf64_vec) tb="$tb elf64-sw64.lo elf64.lo $elf"; target_size=64 ;;
+ alpha_ecoff_le_vec) tb="$tb coff-alpha.lo ecoff.lo $ecoff"; target_size=64 ;;
+ alpha_elf64_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
+ alpha_elf64_fbsd_vec) tb="$tb elf64-alpha.lo elf64.lo $elf"; target_size=64 ;;
+diff -Naur gdb-14.1-after-patch/bfd/cpu-sw64.c gdb-14.1-sw64/bfd/cpu-sw64.c
+--- gdb-14.1-after-patch/bfd/cpu-sw64.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/bfd/cpu-sw64.c 2025-03-03 10:59:12.960000000 +0800
+@@ -0,0 +1,54 @@
++/* BFD support for the SW64 architecture.
++ Copyright (C) 1992-2022 Free Software Foundation, Inc.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++
++#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT) \
++ { \
++ BITS_WORD, /* Bits in a word. */ \
++ BITS_ADDR, /* Bits in an address. */ \
++ 8, /* Bits in a byte. */ \
++ bfd_arch_sw64, \
++ NUMBER, \
++ "sw64", \
++ PRINT, \
++ 3, /* Section alignment power. */ \
++ DEFAULT, \
++ bfd_default_compatible, \
++ bfd_default_scan, \
++ bfd_arch_default_fill, \
++ NEXT, \
++ 0 /* Maximum offset of a reloc from the start of an insn. */ \
++ }
++
++#define NN(index) (&arch_info_struct[index])
++
++/* These exist only so that we can reasonably disassemble PALcode. */
++static const bfd_arch_info_type arch_info_struct[] =
++{
++ N (64, 64, bfd_mach_sw64, "sw64", false, NN(1)),
++ N (64, 64, bfd_mach_sw64_sw6b, "sw64:sw6b", false, NN(2)),
++ N (64, 64, bfd_mach_sw64_sw8a, "sw64:sw8a", false, 0),
++};
++
++const bfd_arch_info_type bfd_sw64_arch =
++ N (64, 64, 0, "sw64", true, NN(0));
diff --git a/gdb-14.1-add-support-for-SW64-006.patch b/gdb-14.1-add-support-for-SW64-006.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3dc76ccef1469741e206867c4a095021d3a5fbe2
--- /dev/null
+++ b/gdb-14.1-add-support-for-SW64-006.patch
@@ -0,0 +1,5761 @@
+diff -Naur gdb-14.1-after-patch/bfd/ecoff.c gdb-14.1-sw64/bfd/ecoff.c
+--- gdb-14.1-after-patch/bfd/ecoff.c 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/bfd/ecoff.c 2025-03-03 10:59:12.970000000 +0800
+@@ -222,6 +222,11 @@
+ mach = 0;
+ break;
+
++ case SW64_MAGIC:
++ arch = bfd_arch_sw64;
++ mach = 0;
++ break;
++
+ default:
+ arch = bfd_arch_obscure;
+ mach = 0;
+@@ -275,6 +280,9 @@
+ case bfd_arch_alpha:
+ return ALPHA_MAGIC;
+
++ case bfd_arch_sw64:
++ return SW64_MAGIC;
++
+ default:
+ abort ();
+ return 0;
+diff -Naur gdb-14.1-after-patch/bfd/elf64-sw64.c gdb-14.1-sw64/bfd/elf64-sw64.c
+--- gdb-14.1-after-patch/bfd/elf64-sw64.c 1970-01-01 08:00:00.000000000 +0800
++++ gdb-14.1-sw64/bfd/elf64-sw64.c 2025-03-03 10:59:12.990000000 +0800
+@@ -0,0 +1,5531 @@
++/* SW64 specific support for 64-bit ELF
++ Copyright (C) 1996-2023 Free Software Foundation, Inc.
++ Contributed by Richard Henderson .
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++
++/* We need a published ABI spec for this. Until one comes out, don't
++ assume this'll remain unchanged forever. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++#include "elf-bfd.h"
++#include "ecoff-bfd.h"
++
++#include "elf/sw64.h"
++
++#define SW64ECOFF
++
++#define NO_COFF_RELOCS
++#define NO_COFF_SYMBOLS
++#define NO_COFF_LINENOS
++
++/* Get the ECOFF swapping routines. Needed for the debug information. */
++#include "coff/internal.h"
++#include "coff/sym.h"
++#include "coff/symconst.h"
++#include "coff/ecoff.h"
++#include "coff/sw64.h"
++#include "aout/ar.h"
++#include "libcoff.h"
++#include "libecoff.h"
++#define ECOFF_64
++#include "ecoffswap.h"
++
++
++/* Instruction data for plt generation and relaxation. */
++
++#define OP_LDA 0x08U
++#define OP_LDAH 0x09U
++#define OP_LDQ 0x29U
++#define OP_BR 0x30U
++#define OP_BSR 0x34U
++
++#define INSN_LDA (OP_LDA << 26)
++#define INSN_LDAH (OP_LDAH << 26)
++#define INSN_LDQ (OP_LDQ << 26)
++#define INSN_BR (OP_BR << 26)
++
++#define INSN_ADDQ 0x40000400
++#define INSN_RDUNIQ 0x0000009e
++#define INSN_SUBQ 0x40000520
++#define INSN_S4SUBQ 0x40000560
++#define INSN_UNOP 0x2ffe0000
++
++#define INSN_JSR 0x68004000
++#define INSN_JMP 0x68000000
++#define INSN_JSR_MASK 0xfc00c000
++
++#define INSN_A(I,A) (I | ((unsigned) A << 21))
++#define INSN_AB(I,A,B) (INSN_A (I, A) | (B << 16))
++#define INSN_ABC(I,A,B,C) (INSN_A (I, A) | (B << 16) | C)
++#define INSN_ABO(I,A,B,O) (INSN_A (I, A) | (B << 16) | ((O) & 0xffff))
++#define INSN_AD(I,A,D) (INSN_A (I, A) | (((D) >> 2) & 0x1fffff))
++
++/* PLT/GOT Stuff */
++
++/* Set by ld emulation. Putting this into the link_info or hash structure
++ is simply working too hard. */
++#ifdef USE_SECUREPLT
++bool elf64_sw64_use_secureplt = true;
++#else
++bool elf64_sw64_use_secureplt = false;
++#endif
++
++#define OLD_PLT_HEADER_SIZE 32
++#define OLD_PLT_ENTRY_SIZE 12
++#define NEW_PLT_HEADER_SIZE 36
++#define NEW_PLT_ENTRY_SIZE 4
++
++#define PLT_HEADER_SIZE \
++ (elf64_sw64_use_secureplt ? NEW_PLT_HEADER_SIZE : OLD_PLT_HEADER_SIZE)
++#define PLT_ENTRY_SIZE \
++ (elf64_sw64_use_secureplt ? NEW_PLT_ENTRY_SIZE : OLD_PLT_ENTRY_SIZE)
++
++#define MAX_GOT_SIZE (64*1024)
++
++#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so"
++
++
++/* Used to implement multiple .got subsections. */
++struct sw64_elf_got_entry
++{
++ struct sw64_elf_got_entry *next;
++
++ /* Which .got subsection? */
++ bfd *gotobj;
++
++ /* The addend in effect for this entry. */
++ bfd_vma addend;
++
++ /* The .got offset for this entry. */
++ int got_offset;
++
++ /* The .plt offset for this entry. */
++ int plt_offset;
++
++ /* How many references to this entry? */
++ int use_count;
++
++ /* The relocation type of this entry. */
++ unsigned char reloc_type;
++
++ /* How a LITERAL is used. */
++ unsigned char flags;
++
++ /* Have we initialized the dynamic relocation for this entry? */
++ unsigned char reloc_done;
++
++ /* Have we adjusted this entry for SEC_MERGE? */
++ unsigned char reloc_xlated;
++};
++
++struct sw64_elf_reloc_entry
++{
++ struct sw64_elf_reloc_entry *next;
++
++ /* Which .reloc section? */
++ asection *srel;
++
++ /* Which section this relocation is against? */
++ asection *sec;
++
++ /* How many did we find? */
++ unsigned long count;
++
++ /* What kind of relocation? */
++ unsigned int rtype;
++};
++
++struct sw64_elf_link_hash_entry
++{
++ struct elf_link_hash_entry root;
++
++ /* External symbol information. */
++ EXTR esym;
++
++ /* Cumulative flags for all the .got entries. */
++ int flags;
++
++ /* Contexts in which a literal was referenced. */
++#define SW64_ELF_LINK_HASH_LU_ADDR 0x01
++#define SW64_ELF_LINK_HASH_LU_MEM 0x02
++#define SW64_ELF_LINK_HASH_LU_BYTE 0x04
++#define SW64_ELF_LINK_HASH_LU_JSR 0x08
++#define SW64_ELF_LINK_HASH_LU_TLSGD 0x10
++#define SW64_ELF_LINK_HASH_LU_TLSLDM 0x20
++#define SW64_ELF_LINK_HASH_LU_JSRDIRECT 0x40
++#define SW64_ELF_LINK_HASH_LU_PLT 0x38
++#define SW64_ELF_LINK_HASH_TLS_IE 0x80
++
++ /* Used to implement multiple .got subsections. */
++ struct sw64_elf_got_entry *got_entries;
++
++ /* Used to count non-got, non-plt relocations for delayed sizing
++ of relocation sections. */
++ struct sw64_elf_reloc_entry *reloc_entries;
++};
++
++/* SW64 ELF linker hash table. */
++
++struct sw64_elf_link_hash_table
++{
++ struct elf_link_hash_table root;
++
++ /* The head of a list of .got subsections linked through
++ sw64_elf_tdata(abfd)->got_link_next. */
++ bfd *got_list;
++
++ /* The most recent relax pass that we've seen. The GOTs
++ should be regenerated if this doesn't match. */
++ int relax_trip;
++};
++
++/* Look up an entry in a SW64 ELF linker hash table. */
++
++#define sw64_elf_link_hash_lookup(table, string, create, copy, follow) \
++ ((struct sw64_elf_link_hash_entry *) \
++ elf_link_hash_lookup (&(table)->root, (string), (create), \
++ (copy), (follow)))
++
++/* Traverse a SW64 ELF linker hash table. */
++
++#define sw64_elf_link_hash_traverse(table, func, info) \
++ (elf_link_hash_traverse \
++ (&(table)->root, \
++ (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
++ (info)))
++
++/* Get the SW64 ELF linker hash table from a link_info structure. */
++
++#define sw64_elf_hash_table(p) \
++ ((is_elf_hash_table ((p)->hash) \
++ && elf_hash_table_id (elf_hash_table (p)) == SW64_ELF_DATA) \
++ ? (struct sw64_elf_link_hash_table *) (p)->hash : NULL)
++
++/* Get the object's symbols as our own entry type. */
++
++#define sw64_elf_sym_hashes(abfd) \
++ ((struct sw64_elf_link_hash_entry **)elf_sym_hashes(abfd))
++
++/* Should we do dynamic things to this symbol? This differs from the
++ generic version in that we never need to consider function pointer
++ equality wrt PLT entries -- we don't create a PLT entry if a symbol's
++ address is ever taken. */
++
++static inline bool
++sw64_elf_dynamic_symbol_p (struct elf_link_hash_entry *h,
++ struct bfd_link_info *info)
++{
++ return _bfd_elf_dynamic_symbol_p (h, info, 0);
++}
++
++/* Create an entry in a SW64 ELF linker hash table. */
++
++static struct bfd_hash_entry *
++elf64_sw64_link_hash_newfunc (struct bfd_hash_entry *entry,
++ struct bfd_hash_table *table,
++ const char *string)
++{
++ struct sw64_elf_link_hash_entry *ret =
++ (struct sw64_elf_link_hash_entry *) entry;
++
++ /* Allocate the structure if it has not already been allocated by a
++ subclass. */
++ if (ret == (struct sw64_elf_link_hash_entry *) NULL)
++ ret = ((struct sw64_elf_link_hash_entry *)
++ bfd_hash_allocate (table,
++ sizeof (struct sw64_elf_link_hash_entry)));
++ if (ret == (struct sw64_elf_link_hash_entry *) NULL)
++ return (struct bfd_hash_entry *) ret;
++
++ /* Call the allocation method of the superclass. */
++ ret = ((struct sw64_elf_link_hash_entry *)
++ _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
++ table, string));
++ if (ret != (struct sw64_elf_link_hash_entry *) NULL)
++ {
++ /* Set local fields. */
++ memset (&ret->esym, 0, sizeof (EXTR));
++ /* We use -2 as a marker to indicate that the information has
++ not been set. -1 means there is no associated ifd. */
++ ret->esym.ifd = -2;
++ ret->flags = 0;
++ ret->got_entries = NULL;
++ ret->reloc_entries = NULL;
++ }
++
++ return (struct bfd_hash_entry *) ret;
++}
++
++/* Create a SW64 ELF linker hash table. */
++
++static struct bfd_link_hash_table *
++elf64_sw64_bfd_link_hash_table_create (bfd *abfd)
++{
++ struct sw64_elf_link_hash_table *ret;
++ size_t amt = sizeof (struct sw64_elf_link_hash_table);
++
++ ret = (struct sw64_elf_link_hash_table *) bfd_zmalloc (amt);
++ if (ret == (struct sw64_elf_link_hash_table *) NULL)
++ return NULL;
++
++ if (!_bfd_elf_link_hash_table_init (&ret->root, abfd,
++ elf64_sw64_link_hash_newfunc,
++ sizeof (struct sw64_elf_link_hash_entry),
++ SW64_ELF_DATA))
++ {
++ free (ret);
++ return NULL;
++ }
++
++ return &ret->root.root;
++}
++
++/* SW64 ELF follows MIPS ELF in using a special find_nearest_line
++ routine in order to handle the ECOFF debugging information. */
++
++struct sw64_elf_find_line
++{
++ struct ecoff_debug_info d;
++ struct ecoff_find_line i;
++};
++
++/* We have some private fields hanging off of the elf_tdata structure. */
++
++struct sw64_elf_obj_tdata
++{
++ struct elf_obj_tdata root;
++
++ /* For every input file, these are the got entries for that object's
++ local symbols. */
++ struct sw64_elf_got_entry ** local_got_entries;
++
++ /* For every input file, this is the object that owns the got that
++ this input file uses. */
++ bfd *gotobj;
++
++ /* For every got, this is a linked list through the objects using this got */
++ bfd *in_got_link_next;
++
++ /* For every got, this is a link to the next got subsegment. */
++ bfd *got_link_next;
++
++ /* For every got, this is the section. */
++ asection *got;
++
++ /* For every got, this is it's total number of words. */
++ int total_got_size;
++
++ /* For every got, this is the sum of the number of words required
++ to hold all of the member object's local got. */
++ int local_got_size;
++
++ /* Used by elf64_sw64_find_nearest_line entry point. */
++ struct sw64_elf_find_line *find_line_info;
++
++};
++
++#define sw64_elf_tdata(abfd) \
++ ((struct sw64_elf_obj_tdata *) (abfd)->tdata.any)
++
++#define is_sw64_elf(bfd) \
++ (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
++ && elf_tdata (bfd) != NULL \
++ && elf_object_id (bfd) == SW64_ELF_DATA)
++
++static bool
++elf64_sw64_mkobject (bfd *abfd)
++{
++ return bfd_elf_allocate_object (abfd, sizeof (struct sw64_elf_obj_tdata),
++ SW64_ELF_DATA);
++}
++
++static bool
++elf64_sw64_object_p (bfd *abfd)
++{
++ /* Set the right machine number for an SW64 ELF file. */
++ return bfd_default_set_arch_mach (abfd, bfd_arch_sw64, 0);
++}
++
++/* A relocation function which doesn't do anything. */
++
++static bfd_reloc_status_type
++elf64_sw64_reloc_nil (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
++ asymbol *sym ATTRIBUTE_UNUSED,
++ void * data ATTRIBUTE_UNUSED, asection *sec,
++ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
++{
++ if (output_bfd)
++ reloc->address += sec->output_offset;
++ return bfd_reloc_ok;
++}
++
++/* A relocation function used for an unsupported reloc. */
++
++static bfd_reloc_status_type
++elf64_sw64_reloc_bad (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
++ asymbol *sym ATTRIBUTE_UNUSED,
++ void * data ATTRIBUTE_UNUSED, asection *sec,
++ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
++{
++ if (output_bfd)
++ reloc->address += sec->output_offset;
++ return bfd_reloc_notsupported;
++}
++
++/* Do the work of the GPDISP relocation. */
++
++static bfd_reloc_status_type
++elf64_sw64_do_reloc_gpdisp (bfd *abfd, bfd_vma gpdisp, bfd_byte *p_ldah,
++ bfd_byte *p_lda)
++{
++ bfd_reloc_status_type ret = bfd_reloc_ok;
++ bfd_vma addend;
++ unsigned long i_ldah, i_lda;
++
++ i_ldah = bfd_get_32 (abfd, p_ldah);
++ i_lda = bfd_get_32 (abfd, p_lda);
++
++ /* Complain if the instructions are not correct. */
++ if (((i_ldah >> 26) & 0x3f) != 0x09
++ || ((i_lda >> 26) & 0x3f) != 0x08)
++ ret = bfd_reloc_dangerous;
++
++ /* Extract the user-supplied offset, mirroring the sign extensions
++ that the instructions perform. */
++ addend = ((i_ldah & 0xffff) << 16) | (i_lda & 0xffff);
++ addend = (addend ^ 0x80008000) - 0x80008000;
++
++ gpdisp += addend;
++
++ if ((bfd_signed_vma) gpdisp < -(bfd_signed_vma) 0x80000000
++ || (bfd_signed_vma) gpdisp >= (bfd_signed_vma) 0x7fff8000)
++ ret = bfd_reloc_overflow;
++
++ /* compensate for the sign extension again. */
++ i_ldah = ((i_ldah & 0xffff0000)
++ | (((gpdisp >> 16) + ((gpdisp >> 15) & 1)) & 0xffff));
++ i_lda = (i_lda & 0xffff0000) | (gpdisp & 0xffff);
++
++ bfd_put_32 (abfd, (bfd_vma) i_ldah, p_ldah);
++ bfd_put_32 (abfd, (bfd_vma) i_lda, p_lda);
++
++ return ret;
++}
++
++/* The special function for the GPDISP reloc. */
++
++static bfd_reloc_status_type
++elf64_sw64_reloc_gpdisp (bfd *abfd, arelent *reloc_entry,
++ asymbol *sym ATTRIBUTE_UNUSED, void * data,
++ asection *input_section, bfd *output_bfd,
++ char **err_msg)
++{
++ bfd_reloc_status_type ret;
++ bfd_vma gp, relocation;
++ bfd_vma high_address;
++ bfd_byte *p_ldah, *p_lda;
++
++ /* Don't do anything if we're not doing a final link. */
++ if (output_bfd)
++ {
++ reloc_entry->address += input_section->output_offset;
++ return bfd_reloc_ok;
++ }
++
++ high_address = bfd_get_section_limit (abfd, input_section);
++ if (reloc_entry->address > high_address
++ || reloc_entry->address + reloc_entry->addend > high_address)
++ return bfd_reloc_outofrange;
++
++ /* The gp used in the portion of the output object to which this
++ input object belongs is cached on the input bfd. */
++ gp = _bfd_get_gp_value (abfd);
++
++ relocation = (input_section->output_section->vma
++ + input_section->output_offset
++ + reloc_entry->address);
++
++ p_ldah = (bfd_byte *) data + reloc_entry->address;
++ p_lda = p_ldah + reloc_entry->addend;
++
++ ret = elf64_sw64_do_reloc_gpdisp (abfd, gp - relocation, p_ldah, p_lda);
++
++ /* Complain if the instructions are not correct. */
++ if (ret == bfd_reloc_dangerous)
++ *err_msg = _("GPDISP relocation did not find ldah and lda instructions");
++
++ return ret;
++}
++
++/* In case we're on a 32-bit machine, construct a 64-bit "-1" value
++ from smaller values. Start with zero, widen, *then* decrement. */
++#define MINUS_ONE (((bfd_vma)0) - 1)
++
++
++#define SKIP_HOWTO(N) \
++ HOWTO(N, 0, 0, 0, 0, 0, complain_overflow_dont, elf64_sw64_reloc_bad, 0, 0, 0, 0, 0)
++
++static reloc_howto_type elf64_sw64_howto_table[] =
++{
++ HOWTO (R_SW64_NONE, /* type */
++ 0, /* rightshift */
++ 0, /* size */
++ 0, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ elf64_sw64_reloc_nil, /* special_function */
++ "NONE", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A 32 bit reference to a symbol. */
++ HOWTO (R_SW64_REFLONG, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "REFLONG", /* name */
++ false, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 64 bit reference to a symbol. */
++ HOWTO (R_SW64_REFQUAD, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "REFQUAD", /* name */
++ false, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 32 bit GP relative offset. This is just like REFLONG except
++ that when the value is used the value of the gp register will be
++ added in. */
++ HOWTO (R_SW64_GPREL32, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GPREL32", /* name */
++ false, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Used for an instruction that refers to memory off the GP register. */
++ HOWTO (R_SW64_LITERAL, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "ELF_LITERAL", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* This reloc only appears immediately following an ELF_LITERAL reloc.
++ It identifies a use of the literal. The symbol index is special:
++ 1 means the literal address is in the base register of a memory
++ format instruction; 2 means the literal address is in the byte
++ offset register of a byte-manipulation instruction; 3 means the
++ literal address is in the target register of a jsr instruction.
++ This does not actually do any relocation. */
++ HOWTO (R_SW64_LITUSE, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 32, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ elf64_sw64_reloc_nil, /* special_function */
++ "LITUSE", /* name */
++ false, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Load the gp register. This is always used for a ldah instruction
++ which loads the upper 16 bits of the gp register. The symbol
++ index of the GPDISP instruction is an offset in bytes to the lda
++ instruction that loads the lower 16 bits. The value to use for
++ the relocation is the difference between the GP value and the
++ current location; the load will always be done against a register
++ holding the current address.
++
++ NOTE: Unlike ECOFF, partial in-place relocation is not done. If
++ any offset is present in the instructions, it is an offset from
++ the register to the ldah instruction. This lets us avoid any
++ stupid hackery like inventing a gp value to do partial relocation
++ against. Also unlike ECOFF, we do the whole relocation off of
++ the GPDISP rather than a GPDISP_HI16/GPDISP_LO16 pair. An odd,
++ space consuming bit, that, since all the information was present
++ in the GPDISP_HI16 reloc. */
++ HOWTO (R_SW64_GPDISP, /* type */
++ 16, /* rightshift */
++ 4, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ elf64_sw64_reloc_gpdisp, /* special_function */
++ "GPDISP", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A 21 bit branch. */
++ HOWTO (R_SW64_BRADDR, /* type */
++ 2, /* rightshift */
++ 4, /* size */
++ 21, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "BRADDR", /* name */
++ false, /* partial_inplace */
++ 0x1fffff, /* src_mask */
++ 0x1fffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A hint for a jump to a register. */
++ HOWTO (R_SW64_HINT, /* type */
++ 2, /* rightshift */
++ 2, /* size */
++ 14, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "HINT", /* name */
++ false, /* partial_inplace */
++ 0x3fff, /* src_mask */
++ 0x3fff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* 16 bit PC relative offset. */
++ HOWTO (R_SW64_SREL16, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "SREL16", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* 32 bit PC relative offset. */
++ HOWTO (R_SW64_SREL32, /* type */
++ 0, /* rightshift */
++ 4, /* size */
++ 32, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "SREL32", /* name */
++ false, /* partial_inplace */
++ 0xffffffff, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* A 64 bit PC relative offset. */
++ HOWTO (R_SW64_SREL64, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "SREL64", /* name */
++ false, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* Skip 12 - 16; deprecated ECOFF relocs. */
++ SKIP_HOWTO (12),
++ SKIP_HOWTO (13),
++ SKIP_HOWTO (14),
++ SKIP_HOWTO (15),
++ SKIP_HOWTO (16),
++
++ /* The high 16 bits of the displacement from GP to the target. */
++ HOWTO (R_SW64_GPRELHIGH,
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GPRELHIGH", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* The low 16 bits of the displacement from GP to the target. */
++ HOWTO (R_SW64_GPRELLOW,
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GPRELLOW", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 16-bit displacement from the GP to the target. */
++ HOWTO (R_SW64_GPREL16,
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GPREL16", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Skip 20 - 23; deprecated ECOFF relocs. */
++ SKIP_HOWTO (20),
++ SKIP_HOWTO (21),
++ SKIP_HOWTO (22),
++ SKIP_HOWTO (23),
++
++ /* Misc ELF relocations. */
++
++ /* A dynamic relocation to copy the target into our .dynbss section. */
++ /* Not generated, as all SW64 objects use PIC, so it is not needed. It
++ is present because every other ELF has one, but should not be used
++ because .dynbss is an ugly thing. */
++ HOWTO (R_SW64_COPY,
++ 0,
++ 0,
++ 0,
++ false,
++ 0,
++ complain_overflow_dont,
++ bfd_elf_generic_reloc,
++ "COPY",
++ false,
++ 0,
++ 0,
++ true),
++
++ /* A dynamic relocation for a .got entry. */
++ HOWTO (R_SW64_GLOB_DAT,
++ 0,
++ 0,
++ 0,
++ false,
++ 0,
++ complain_overflow_dont,
++ bfd_elf_generic_reloc,
++ "GLOB_DAT",
++ false,
++ 0,
++ 0,
++ true),
++
++ /* A dynamic relocation for a .plt entry. */
++ HOWTO (R_SW64_JMP_SLOT,
++ 0,
++ 0,
++ 0,
++ false,
++ 0,
++ complain_overflow_dont,
++ bfd_elf_generic_reloc,
++ "JMP_SLOT",
++ false,
++ 0,
++ 0,
++ true),
++
++ /* A dynamic relocation to add the base of the DSO to a 64-bit field. */
++ HOWTO (R_SW64_RELATIVE,
++ 0,
++ 0,
++ 0,
++ false,
++ 0,
++ complain_overflow_dont,
++ bfd_elf_generic_reloc,
++ "RELATIVE",
++ false,
++ 0,
++ 0,
++ true),
++
++ /* A 21 bit branch that adjusts for gp loads. */
++ HOWTO (R_SW64_BRSGP, /* type */
++ 2, /* rightshift */
++ 4, /* size */
++ 21, /* bitsize */
++ true, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "BRSGP", /* name */
++ false, /* partial_inplace */
++ 0x1fffff, /* src_mask */
++ 0x1fffff, /* dst_mask */
++ true), /* pcrel_offset */
++
++ /* Creates a tls_index for the symbol in the got. */
++ HOWTO (R_SW64_TLSGD, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TLSGD", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Creates a tls_index for the (current) module in the got. */
++ HOWTO (R_SW64_TLSLDM, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TLSLDM", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A dynamic relocation for a DTP module entry. */
++ HOWTO (R_SW64_DTPMOD64, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "DTPMOD64", /* name */
++ false, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Creates a 64-bit offset in the got for the displacement
++ from DTP to the target. */
++ HOWTO (R_SW64_GOTDTPREL, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GOTDTPREL", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A dynamic relocation for a displacement from DTP to the target. */
++ HOWTO (R_SW64_DTPREL64, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "DTPREL64", /* name */
++ false, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* The high 16 bits of the displacement from DTP to the target. */
++ HOWTO (R_SW64_DTPRELHI, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "DTPRELHI", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* The low 16 bits of the displacement from DTP to the target. */
++ HOWTO (R_SW64_DTPRELLO, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "DTPRELLO", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 16-bit displacement from DTP to the target. */
++ HOWTO (R_SW64_DTPREL16, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "DTPREL16", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* Creates a 64-bit offset in the got for the displacement
++ from TP to the target. */
++ HOWTO (R_SW64_GOTTPREL, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "GOTTPREL", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A dynamic relocation for a displacement from TP to the target. */
++ HOWTO (R_SW64_TPREL64, /* type */
++ 0, /* rightshift */
++ 8, /* size */
++ 64, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TPREL64", /* name */
++ false, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* The high 16 bits of the displacement from TP to the target. */
++ HOWTO (R_SW64_TPRELHI, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TPRELHI", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* The low 16 bits of the displacement from TP to the target. */
++ HOWTO (R_SW64_TPRELLO, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TPRELLO", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++
++ /* A 16-bit displacement from TP to the target. */
++ HOWTO (R_SW64_TPREL16, /* type */
++ 0, /* rightshift */
++ 2, /* size */
++ 16, /* bitsize */
++ false, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "TPREL16", /* name */
++ false, /* partial_inplace */
++ 0xffff, /* src_mask */
++ 0xffff, /* dst_mask */
++ false), /* pcrel_offset */
++};
++
++/* A mapping from BFD reloc types to SW64 ELF reloc types. */
++
++struct elf_reloc_map
++{
++ bfd_reloc_code_real_type bfd_reloc_val;
++ int elf_reloc_val;
++};
++
++static const struct elf_reloc_map elf64_sw64_reloc_map[] =
++{
++ {BFD_RELOC_NONE, R_SW64_NONE},
++ {BFD_RELOC_32, R_SW64_REFLONG},
++ {BFD_RELOC_64, R_SW64_REFQUAD},
++ {BFD_RELOC_CTOR, R_SW64_REFQUAD},
++ {BFD_RELOC_GPREL32, R_SW64_GPREL32},
++ {BFD_RELOC_SW64_ELF_LITERAL, R_SW64_LITERAL},
++ {BFD_RELOC_SW64_LITUSE, R_SW64_LITUSE},
++ {BFD_RELOC_SW64_GPDISP, R_SW64_GPDISP},
++ {BFD_RELOC_23_PCREL_S2, R_SW64_BRADDR},
++ {BFD_RELOC_SW64_HINT, R_SW64_HINT},
++ {BFD_RELOC_16_PCREL, R_SW64_SREL16},
++ {BFD_RELOC_32_PCREL, R_SW64_SREL32},
++ {BFD_RELOC_64_PCREL, R_SW64_SREL64},
++ {BFD_RELOC_SW64_GPREL_HI16, R_SW64_GPRELHIGH},
++ {BFD_RELOC_SW64_GPREL_LO16, R_SW64_GPRELLOW},
++ {BFD_RELOC_GPREL16, R_SW64_GPREL16},
++ {BFD_RELOC_SW64_BRSGP, R_SW64_BRSGP},
++ {BFD_RELOC_SW64_TLSGD, R_SW64_TLSGD},
++ {BFD_RELOC_SW64_TLSLDM, R_SW64_TLSLDM},
++ {BFD_RELOC_SW64_DTPMOD64, R_SW64_DTPMOD64},
++ {BFD_RELOC_SW64_GOTDTPREL16, R_SW64_GOTDTPREL},
++ {BFD_RELOC_SW64_DTPREL64, R_SW64_DTPREL64},
++ {BFD_RELOC_SW64_DTPREL_HI16, R_SW64_DTPRELHI},
++ {BFD_RELOC_SW64_DTPREL_LO16, R_SW64_DTPRELLO},
++ {BFD_RELOC_SW64_DTPREL16, R_SW64_DTPREL16},
++ {BFD_RELOC_SW64_GOTTPREL16, R_SW64_GOTTPREL},
++ {BFD_RELOC_SW64_TPREL64, R_SW64_TPREL64},
++ {BFD_RELOC_SW64_TPREL_HI16, R_SW64_TPRELHI},
++ {BFD_RELOC_SW64_TPREL_LO16, R_SW64_TPRELLO},
++ {BFD_RELOC_SW64_TPREL16, R_SW64_TPREL16},
++};
++
++/* Given a BFD reloc type, return a HOWTO structure. */
++
++static reloc_howto_type *
++elf64_sw64_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ bfd_reloc_code_real_type code)
++{
++ const struct elf_reloc_map *i, *e;
++ i = e = elf64_sw64_reloc_map;
++ e += sizeof (elf64_sw64_reloc_map) / sizeof (struct elf_reloc_map);
++ for (; i != e; ++i)
++ {
++ if (i->bfd_reloc_val == code)
++ return &elf64_sw64_howto_table[i->elf_reloc_val];
++ }
++ return 0;
++}
++
++static reloc_howto_type *
++elf64_sw64_bfd_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ const char *r_name)
++{
++ unsigned int i;
++
++ for (i = 0;
++ i < (sizeof (elf64_sw64_howto_table)
++ / sizeof (elf64_sw64_howto_table[0]));
++ i++)
++ if (elf64_sw64_howto_table[i].name != NULL
++ && strcasecmp (elf64_sw64_howto_table[i].name, r_name) == 0)
++ return &elf64_sw64_howto_table[i];
++
++ return NULL;
++}
++
++/* Given an SW64 ELF reloc type, fill in an arelent structure. */
++
++static bool
++elf64_sw64_info_to_howto (bfd *abfd, arelent *cache_ptr,
++ Elf_Internal_Rela *dst)
++{
++ unsigned r_type = ELF64_R_TYPE(dst->r_info);
++
++ if (r_type >= R_SW64_max)
++ {
++ /* xgettext:c-format */
++ _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
++ abfd, r_type);
++ bfd_set_error (bfd_error_bad_value);
++ return false;
++ }
++ cache_ptr->howto = &elf64_sw64_howto_table[r_type];
++ return true;
++}
++
++/* These two relocations create a two-word entry in the got. */
++#define sw64_got_entry_size(r_type) \
++ (r_type == R_SW64_TLSGD || r_type == R_SW64_TLSLDM ? 16 : 8)
++
++/* This is PT_TLS segment p_vaddr. */
++#define sw64_get_dtprel_base(info) \
++ (elf_hash_table (info)->tls_sec->vma)
++
++/* Main program TLS (whose template starts at PT_TLS p_vaddr)
++ is assigned offset round(16, PT_TLS p_align). */
++#define sw64_get_tprel_base(info) \
++ (elf_hash_table (info)->tls_sec->vma \
++ - align_power ((bfd_vma) 16, \
++ elf_hash_table (info)->tls_sec->alignment_power))
++
++/* Handle an SW64 specific section when reading an object file. This
++ is called when bfd_section_from_shdr finds a section with an unknown
++ type. */
++
++static bool
++elf64_sw64_section_from_shdr (bfd *abfd,
++ Elf_Internal_Shdr *hdr,
++ const char *name,
++ int shindex)
++{
++ asection *newsect;
++
++ /* There ought to be a place to keep ELF backend specific flags, but
++ at the moment there isn't one. We just keep track of the
++ sections by their name, instead. Fortunately, the ABI gives
++ suggested names for all the MIPS specific sections, so we will
++ probably get away with this. */
++ switch (hdr->sh_type)
++ {
++ case SHT_SW64_DEBUG:
++ if (strcmp (name, ".mdebug") != 0)
++ return false;
++ break;
++ default:
++ return false;
++ }
++
++ if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
++ return false;
++ newsect = hdr->bfd_section;
++
++ if (hdr->sh_type == SHT_SW64_DEBUG)
++ {
++ if (!bfd_set_section_flags (newsect,
++ bfd_section_flags (newsect) | SEC_DEBUGGING))
++ return false;
++ }
++
++ return true;
++}
++
++/* Convert SW64 specific section flags to bfd internal section flags. */
++
++static bool
++elf64_sw64_section_flags (const Elf_Internal_Shdr *hdr)
++{
++ if (hdr->sh_flags & SHF_SW64_GPREL)
++ hdr->bfd_section->flags |= SEC_SMALL_DATA;
++
++ return true;
++}
++
++/* Set the correct type for an SW64 ELF section. We do this by the
++ section name, which is a hack, but ought to work. */
++
++static bool
++elf64_sw64_fake_sections (bfd *abfd, Elf_Internal_Shdr *hdr, asection *sec)
++{
++ register const char *name;
++
++ name = bfd_section_name (sec);
++
++ if (strcmp (name, ".mdebug") == 0)
++ {
++ hdr->sh_type = SHT_SW64_DEBUG;
++ /* In a shared object on Irix 5.3, the .mdebug section has an
++ entsize of 0. FIXME: Does this matter? */
++ if ((abfd->flags & DYNAMIC) != 0 )
++ hdr->sh_entsize = 0;
++ else
++ hdr->sh_entsize = 1;
++ }
++ else if ((sec->flags & SEC_SMALL_DATA)
++ || strcmp (name, ".sdata") == 0
++ || strcmp (name, ".sbss") == 0
++ || strcmp (name, ".lit4") == 0
++ || strcmp (name, ".lit8") == 0)
++ hdr->sh_flags |= SHF_SW64_GPREL;
++
++ return true;
++}
++
++/* Hook called by the linker routine which adds symbols from an object
++ file. We use it to put .comm items in .sbss, and not .bss. */
++
++static bool
++elf64_sw64_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
++ Elf_Internal_Sym *sym,
++ const char **namep ATTRIBUTE_UNUSED,
++ flagword *flagsp ATTRIBUTE_UNUSED,
++ asection **secp, bfd_vma *valp)
++{
++ if (sym->st_shndx == SHN_COMMON
++ && !bfd_link_relocatable (info)
++ && sym->st_size <= elf_gp_size (abfd))
++ {
++ /* Common symbols less than or equal to -G nn bytes are
++ automatically put into .sbss. */
++
++ asection *scomm = bfd_get_section_by_name (abfd, ".scommon");
++
++ if (scomm == NULL)
++ {
++ scomm = bfd_make_section_with_flags (abfd, ".scommon",
++ (SEC_ALLOC
++ | SEC_IS_COMMON
++ | SEC_SMALL_DATA
++ | SEC_LINKER_CREATED));
++ if (scomm == NULL)
++ return false;
++ }
++
++ *secp = scomm;
++ *valp = sym->st_size;
++ }
++
++ return true;
++}
++
++/* Create the .got section. */
++
++static bool
++elf64_sw64_create_got_section (bfd *abfd,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED)
++{
++ flagword flags;
++ asection *s;
++
++ if (! is_sw64_elf (abfd))
++ return false;
++
++ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
++ | SEC_LINKER_CREATED);
++ s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
++ if (s == NULL
++ || !bfd_set_section_alignment (s, 3))
++ return false;
++
++ sw64_elf_tdata (abfd)->got = s;
++
++ /* Make sure the object's gotobj is set to itself so that we default
++ to every object with its own .got. We'll merge .gots later once
++ we've collected each object's info. */
++ sw64_elf_tdata (abfd)->gotobj = abfd;
++
++ return true;
++}
++
++/* Create all the dynamic sections. */
++
++static bool
++elf64_sw64_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info)
++{
++ asection *s;
++ flagword flags;
++ struct elf_link_hash_entry *h;
++
++ if (! is_sw64_elf (abfd))
++ return false;
++
++ /* We need to create .plt, .rela.plt, .got, and .rela.got sections. */
++
++ flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS | SEC_IN_MEMORY
++ | SEC_LINKER_CREATED
++ | (elf64_sw64_use_secureplt ? SEC_READONLY : 0));
++ s = bfd_make_section_anyway_with_flags (abfd, ".plt", flags);
++ elf_hash_table (info)->splt = s;
++ if (s == NULL || ! bfd_set_section_alignment (s, 4))
++ return false;
++
++ /* Define the symbol _PROCEDURE_LINKAGE_TABLE_ at the start of the
++ .plt section. */
++ h = _bfd_elf_define_linkage_sym (abfd, info, s,
++ "_PROCEDURE_LINKAGE_TABLE_");
++ elf_hash_table (info)->hplt = h;
++ if (h == NULL)
++ return false;
++
++ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
++ | SEC_LINKER_CREATED | SEC_READONLY);
++ s = bfd_make_section_anyway_with_flags (abfd, ".rela.plt", flags);
++ elf_hash_table (info)->srelplt = s;
++ if (s == NULL || ! bfd_set_section_alignment (s, 3))
++ return false;
++
++ if (elf64_sw64_use_secureplt)
++ {
++ flags = SEC_ALLOC | SEC_LINKER_CREATED;
++ s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
++ elf_hash_table (info)->sgotplt = s;
++ if (s == NULL || ! bfd_set_section_alignment (s, 3))
++ return false;
++ }
++
++ /* We may or may not have created a .got section for this object, but
++ we definitely havn't done the rest of the work. */
++
++ if (sw64_elf_tdata(abfd)->gotobj == NULL)
++ {
++ if (!elf64_sw64_create_got_section (abfd, info))
++ return false;
++ }
++
++ flags = (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY
++ | SEC_LINKER_CREATED | SEC_READONLY);
++ s = bfd_make_section_anyway_with_flags (abfd, ".rela.got", flags);
++ elf_hash_table (info)->srelgot = s;
++ if (s == NULL
++ || !bfd_set_section_alignment (s, 3))
++ return false;
++
++ /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
++ dynobj's .got section. We don't do this in the linker script
++ because we don't want to define the symbol if we are not creating
++ a global offset table. */
++ h = _bfd_elf_define_linkage_sym (abfd, info, sw64_elf_tdata(abfd)->got,
++ "_GLOBAL_OFFSET_TABLE_");
++ elf_hash_table (info)->hgot = h;
++ if (h == NULL)
++ return false;
++
++ return true;
++}
++
++/* Read ECOFF debugging information from a .mdebug section into a
++ ecoff_debug_info structure. */
++
++static bool
++elf64_sw64_read_ecoff_info (bfd *abfd, asection *section,
++ struct ecoff_debug_info *debug)
++{
++ HDRR *symhdr;
++ const struct ecoff_debug_swap *swap;
++ char *ext_hdr = NULL;
++
++ swap = get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
++ memset (debug, 0, sizeof (*debug));
++
++ ext_hdr = (char *) bfd_malloc (swap->external_hdr_size);
++ if (ext_hdr == NULL && swap->external_hdr_size != 0)
++ goto error_return;
++
++ if (! bfd_get_section_contents (abfd, section, ext_hdr, (file_ptr) 0,
++ swap->external_hdr_size))
++ goto error_return;
++
++ symhdr = &debug->symbolic_header;
++ (*swap->swap_hdr_in) (abfd, ext_hdr, symhdr);
++
++ /* The symbolic header contains absolute file offsets and sizes to
++ read. */
++#define READ(ptr, offset, count, size, type) \
++ do \
++ { \
++ size_t amt; \
++ debug->ptr = NULL; \
++ if (symhdr->count == 0) \
++ break; \
++ if (_bfd_mul_overflow (size, symhdr->count, &amt)) \
++ { \
++ bfd_set_error (bfd_error_file_too_big); \
++ goto error_return; \
++ } \
++ if (bfd_seek (abfd, symhdr->offset, SEEK_SET) != 0) \
++ goto error_return; \
++ debug->ptr = (type) _bfd_malloc_and_read (abfd, amt, amt); \
++ if (debug->ptr == NULL) \
++ goto error_return; \
++ } while (0)
++
++ READ (line, cbLineOffset, cbLine, sizeof (unsigned char), unsigned char *);
++ READ (external_dnr, cbDnOffset, idnMax, swap->external_dnr_size, void *);
++ READ (external_pdr, cbPdOffset, ipdMax, swap->external_pdr_size, void *);
++ READ (external_sym, cbSymOffset, isymMax, swap->external_sym_size, void *);
++ READ (external_opt, cbOptOffset, ioptMax, swap->external_opt_size, void *);
++ READ (external_aux, cbAuxOffset, iauxMax, sizeof (union aux_ext),
++ union aux_ext *);
++ READ (ss, cbSsOffset, issMax, sizeof (char), char *);
++ READ (ssext, cbSsExtOffset, issExtMax, sizeof (char), char *);
++ READ (external_fdr, cbFdOffset, ifdMax, swap->external_fdr_size, void *);
++ READ (external_rfd, cbRfdOffset, crfd, swap->external_rfd_size, void *);
++ READ (external_ext, cbExtOffset, iextMax, swap->external_ext_size, void *);
++#undef READ
++
++ debug->fdr = NULL;
++
++ return true;
++
++ error_return:
++ free (ext_hdr);
++ _bfd_ecoff_free_ecoff_debug_info (debug);
++ return false;
++}
++
++/* SW64 ELF local labels start with '$'. */
++
++static bool
++elf64_sw64_is_local_label_name (bfd *abfd ATTRIBUTE_UNUSED, const char *name)
++{
++ return name[0] == '$';
++}
++
++static bool
++elf64_sw64_find_nearest_line (bfd *abfd, asymbol **symbols,
++ asection *section, bfd_vma offset,
++ const char **filename_ptr,
++ const char **functionname_ptr,
++ unsigned int *line_ptr,
++ unsigned int *discriminator_ptr)
++{
++ asection *msec;
++
++ if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
++ filename_ptr, functionname_ptr,
++ line_ptr, discriminator_ptr,
++ dwarf_debug_sections,
++ &elf_tdata (abfd)->dwarf2_find_line_info)
++ == 1)
++ return true;
++
++ msec = bfd_get_section_by_name (abfd, ".mdebug");
++ if (msec != NULL)
++ {
++ flagword origflags;
++ struct sw64_elf_find_line *fi;
++ const struct ecoff_debug_swap * const swap =
++ get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
++
++ /* If we are called during a link, sw64_elf_final_link may have
++ cleared the SEC_HAS_CONTENTS field. We force it back on here
++ if appropriate (which it normally will be). */
++ origflags = msec->flags;
++ if (elf_section_data (msec)->this_hdr.sh_type != SHT_NOBITS)
++ msec->flags |= SEC_HAS_CONTENTS;
++
++ fi = sw64_elf_tdata (abfd)->find_line_info;
++ if (fi == NULL)
++ {
++ bfd_size_type external_fdr_size;
++ char *fraw_src;
++ char *fraw_end;
++ struct fdr *fdr_ptr;
++ bfd_size_type amt = sizeof (struct sw64_elf_find_line);
++
++ fi = (struct sw64_elf_find_line *) bfd_zalloc (abfd, amt);
++ if (fi == NULL)
++ {
++ msec->flags = origflags;
++ return false;
++ }
++
++ if (!elf64_sw64_read_ecoff_info (abfd, msec, &fi->d))
++ {
++ msec->flags = origflags;
++ return false;
++ }
++
++ /* Swap in the FDR information. */
++ amt = fi->d.symbolic_header.ifdMax * sizeof (struct fdr);
++ fi->d.fdr = (struct fdr *) bfd_alloc (abfd, amt);
++ if (fi->d.fdr == NULL)
++ {
++ msec->flags = origflags;
++ return false;
++ }
++ external_fdr_size = swap->external_fdr_size;
++ fdr_ptr = fi->d.fdr;
++ fraw_src = (char *) fi->d.external_fdr;
++ fraw_end = (fraw_src
++ + fi->d.symbolic_header.ifdMax * external_fdr_size);
++ for (; fraw_src < fraw_end; fraw_src += external_fdr_size, fdr_ptr++)
++ (*swap->swap_fdr_in) (abfd, fraw_src, fdr_ptr);
++
++ sw64_elf_tdata (abfd)->find_line_info = fi;
++ }
++
++ if (_bfd_ecoff_locate_line (abfd, section, offset, &fi->d, swap,
++ &fi->i, filename_ptr, functionname_ptr,
++ line_ptr))
++ {
++ msec->flags = origflags;
++ return true;
++ }
++
++ msec->flags = origflags;
++ }
++
++ /* Fall back on the generic ELF find_nearest_line routine. */
++
++ return _bfd_elf_find_nearest_line (abfd, symbols, section, offset,
++ filename_ptr, functionname_ptr,
++ line_ptr, discriminator_ptr);
++}
++
++/* Structure used to pass information to sw64_elf_output_extsym. */
++
++struct extsym_info
++{
++ bfd *abfd;
++ struct bfd_link_info *info;
++ struct ecoff_debug_info *debug;
++ const struct ecoff_debug_swap *swap;
++ bool failed;
++};
++
++static bool
++elf64_sw64_output_extsym (struct elf_link_hash_entry *x, void * data)
++{
++ struct sw64_elf_link_hash_entry *h = (struct sw64_elf_link_hash_entry *) x;
++ struct extsym_info *einfo = (struct extsym_info *) data;
++ bool strip;
++ asection *sec, *output_section;
++
++ if (h->root.indx == -2)
++ strip = false;
++ else if ((h->root.def_dynamic
++ || h->root.ref_dynamic
++ || h->root.root.type == bfd_link_hash_new)
++ && !h->root.def_regular
++ && !h->root.ref_regular)
++ strip = true;
++ else if (einfo->info->strip == strip_all
++ || (einfo->info->strip == strip_some
++ && bfd_hash_lookup (einfo->info->keep_hash,
++ h->root.root.root.string,
++ false, false) == NULL))
++ strip = true;
++ else
++ strip = false;
++
++ if (strip)
++ return true;
++
++ if (h->esym.ifd == -2)
++ {
++ h->esym.jmptbl = 0;
++ h->esym.cobol_main = 0;
++ h->esym.weakext = 0;
++ h->esym.reserved = 0;
++ h->esym.ifd = ifdNil;
++ h->esym.asym.value = 0;
++ h->esym.asym.st = stGlobal;
++
++ if (h->root.root.type != bfd_link_hash_defined
++ && h->root.root.type != bfd_link_hash_defweak)
++ h->esym.asym.sc = scAbs;
++ else
++ {
++ const char *name;
++
++ sec = h->root.root.u.def.section;
++ output_section = sec->output_section;
++
++ /* When making a shared library and symbol h is the one from
++ the another shared library, OUTPUT_SECTION may be null. */
++ if (output_section == NULL)
++ h->esym.asym.sc = scUndefined;
++ else
++ {
++ name = bfd_section_name (output_section);
++
++ if (strcmp (name, ".text") == 0)
++ h->esym.asym.sc = scText;
++ else if (strcmp (name, ".data") == 0)
++ h->esym.asym.sc = scData;
++ else if (strcmp (name, ".sdata") == 0)
++ h->esym.asym.sc = scSData;
++ else if (strcmp (name, ".rodata") == 0
++ || strcmp (name, ".rdata") == 0)
++ h->esym.asym.sc = scRData;
++ else if (strcmp (name, ".bss") == 0)
++ h->esym.asym.sc = scBss;
++ else if (strcmp (name, ".sbss") == 0)
++ h->esym.asym.sc = scSBss;
++ else if (strcmp (name, ".init") == 0)
++ h->esym.asym.sc = scInit;
++ else if (strcmp (name, ".fini") == 0)
++ h->esym.asym.sc = scFini;
++ else
++ h->esym.asym.sc = scAbs;
++ }
++ }
++
++ h->esym.asym.reserved = 0;
++ h->esym.asym.index = indexNil;
++ }
++
++ if (h->root.root.type == bfd_link_hash_common)
++ h->esym.asym.value = h->root.root.u.c.size;
++ else if (h->root.root.type == bfd_link_hash_defined
++ || h->root.root.type == bfd_link_hash_defweak)
++ {
++ if (h->esym.asym.sc == scCommon)
++ h->esym.asym.sc = scBss;
++ else if (h->esym.asym.sc == scSCommon)
++ h->esym.asym.sc = scSBss;
++
++ sec = h->root.root.u.def.section;
++ output_section = sec->output_section;
++ if (output_section != NULL)
++ h->esym.asym.value = (h->root.root.u.def.value
++ + sec->output_offset
++ + output_section->vma);
++ else
++ h->esym.asym.value = 0;
++ }
++
++ if (! bfd_ecoff_debug_one_external (einfo->abfd, einfo->debug, einfo->swap,
++ h->root.root.root.string,
++ &h->esym))
++ {
++ einfo->failed = true;
++ return false;
++ }
++
++ return true;
++}
++
++/* Search for and possibly create a got entry. */
++
++static struct sw64_elf_got_entry *
++get_got_entry (bfd *abfd, struct sw64_elf_link_hash_entry *h,
++ unsigned long r_type, unsigned long r_symndx,
++ bfd_vma r_addend)
++{
++ struct sw64_elf_got_entry *gotent;
++ struct sw64_elf_got_entry **slot;
++
++ if (h)
++ slot = &h->got_entries;
++ else
++ {
++ /* This is a local .got entry -- record for merge. */
++
++ struct sw64_elf_got_entry **local_got_entries;
++
++ local_got_entries = sw64_elf_tdata(abfd)->local_got_entries;
++ if (!local_got_entries)
++ {
++ bfd_size_type size;
++ Elf_Internal_Shdr *symtab_hdr;
++
++ symtab_hdr = &elf_tdata(abfd)->symtab_hdr;
++ size = symtab_hdr->sh_info;
++ size *= sizeof (struct sw64_elf_got_entry *);
++
++ local_got_entries
++ = (struct sw64_elf_got_entry **) bfd_zalloc (abfd, size);
++ if (!local_got_entries)
++ return NULL;
++
++ sw64_elf_tdata (abfd)->local_got_entries = local_got_entries;
++ }
++
++ slot = &local_got_entries[r_symndx];
++ }
++
++ for (gotent = *slot; gotent ; gotent = gotent->next)
++ if (gotent->gotobj == abfd
++ && gotent->reloc_type == r_type
++ && gotent->addend == r_addend)
++ break;
++
++ if (!gotent)
++ {
++ int entry_size;
++ size_t amt;
++
++ amt = sizeof (struct sw64_elf_got_entry);
++ gotent = (struct sw64_elf_got_entry *) bfd_alloc (abfd, amt);
++ if (!gotent)
++ return NULL;
++
++ gotent->gotobj = abfd;
++ gotent->addend = r_addend;
++ gotent->got_offset = -1;
++ gotent->plt_offset = -1;
++ gotent->use_count = 1;
++ gotent->reloc_type = r_type;
++ gotent->reloc_done = 0;
++ gotent->reloc_xlated = 0;
++
++ gotent->next = *slot;
++ *slot = gotent;
++
++ entry_size = sw64_got_entry_size (r_type);
++ sw64_elf_tdata (abfd)->total_got_size += entry_size;
++ if (!h)
++ sw64_elf_tdata(abfd)->local_got_size += entry_size;
++ }
++ else
++ gotent->use_count += 1;
++
++ return gotent;
++}
++
++static bool
++elf64_sw64_want_plt (struct sw64_elf_link_hash_entry *ah)
++{
++ return ((ah->root.type == STT_FUNC
++ || ah->root.root.type == bfd_link_hash_undefweak
++ || ah->root.root.type == bfd_link_hash_undefined)
++ && (ah->flags & SW64_ELF_LINK_HASH_LU_PLT) != 0
++ && (ah->flags & ~SW64_ELF_LINK_HASH_LU_PLT) == 0);
++}
++
++/* Whether to sort relocs output by ld -r or ld --emit-relocs, by r_offset.
++ Don't do so for code sections. We want to keep ordering of LITERAL/LITUSE
++ as is. On the other hand, elf-eh-frame.c processing requires .eh_frame
++ relocs to be sorted. */
++
++static bool
++elf64_sw64_sort_relocs_p (asection *sec)
++{
++ return (sec->flags & SEC_CODE) == 0;
++}
++
++
++/* Handle dynamic relocations when doing an SW64 ELF link. */
++
++static bool
++elf64_sw64_check_relocs (bfd *abfd, struct bfd_link_info *info,
++ asection *sec, const Elf_Internal_Rela *relocs)
++{
++ bfd *dynobj;
++ asection *sreloc;
++ Elf_Internal_Shdr *symtab_hdr;
++ struct sw64_elf_link_hash_entry **sym_hashes;
++ const Elf_Internal_Rela *rel, *relend;
++
++ if (bfd_link_relocatable (info))
++ return true;
++
++ BFD_ASSERT (is_sw64_elf (abfd));
++
++ dynobj = elf_hash_table (info)->dynobj;
++ if (dynobj == NULL)
++ elf_hash_table (info)->dynobj = dynobj = abfd;
++
++ sreloc = NULL;
++ symtab_hdr = &elf_symtab_hdr (abfd);
++ sym_hashes = sw64_elf_sym_hashes (abfd);
++
++ relend = relocs + sec->reloc_count;
++ for (rel = relocs; rel < relend; ++rel)
++ {
++ enum {
++ NEED_GOT = 1,
++ NEED_GOT_ENTRY = 2,
++ NEED_DYNREL = 4
++ };
++
++ unsigned long r_symndx, r_type;
++ struct sw64_elf_link_hash_entry *h;
++ unsigned int gotent_flags;
++ bool maybe_dynamic;
++ unsigned int need;
++ bfd_vma addend;
++
++ r_symndx = ELF64_R_SYM (rel->r_info);
++ if (r_symndx < symtab_hdr->sh_info)
++ h = NULL;
++ else
++ {
++ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++
++ while (h->root.root.type == bfd_link_hash_indirect
++ || h->root.root.type == bfd_link_hash_warning)
++ h = (struct sw64_elf_link_hash_entry *)h->root.root.u.i.link;
++
++ /* PR15323, ref flags aren't set for references in the same
++ object. */
++ h->root.ref_regular = 1;
++ }
++
++ /* We can only get preliminary data on whether a symbol is
++ locally or externally defined, as not all of the input files
++ have yet been processed. Do something with what we know, as
++ this may help reduce memory usage and processing time later. */
++ maybe_dynamic = false;
++ if (h && ((bfd_link_pic (info)
++ && (!info->symbolic
++ || info->unresolved_syms_in_shared_libs == RM_IGNORE))
++ || !h->root.def_regular
++ || h->root.root.type == bfd_link_hash_defweak))
++ maybe_dynamic = true;
++
++ need = 0;
++ gotent_flags = 0;
++ r_type = ELF64_R_TYPE (rel->r_info);
++ addend = rel->r_addend;
++
++ switch (r_type)
++ {
++ case R_SW64_LITERAL:
++ need = NEED_GOT | NEED_GOT_ENTRY;
++
++ /* Remember how this literal is used from its LITUSEs.
++ This will be important when it comes to decide if we can
++ create a .plt entry for a function symbol. */
++ while (++rel < relend && ELF64_R_TYPE (rel->r_info) == R_SW64_LITUSE)
++ if (rel->r_addend >= 1 && rel->r_addend <= 6)
++ gotent_flags |= 1 << rel->r_addend;
++ --rel;
++
++ /* No LITUSEs -- presumably the address is used somehow. */
++ if (gotent_flags == 0)
++ gotent_flags = SW64_ELF_LINK_HASH_LU_ADDR;
++ break;
++
++ case R_SW64_GPDISP:
++ case R_SW64_GPREL16:
++ case R_SW64_GPREL32:
++ case R_SW64_GPRELHIGH:
++ case R_SW64_GPRELLOW:
++ case R_SW64_BRSGP:
++ need = NEED_GOT;
++ break;
++
++ case R_SW64_REFLONG:
++ case R_SW64_REFQUAD:
++ if (bfd_link_pic (info) || maybe_dynamic)
++ need = NEED_DYNREL;
++ break;
++
++ case R_SW64_TLSLDM:
++ /* The symbol for a TLSLDM reloc is ignored. Collapse the
++ reloc to the STN_UNDEF (0) symbol so that they all match. */
++ r_symndx = STN_UNDEF;
++ h = 0;
++ maybe_dynamic = false;
++ /* FALLTHRU */
++
++ case R_SW64_TLSGD:
++ case R_SW64_GOTDTPREL:
++ need = NEED_GOT | NEED_GOT_ENTRY;
++ break;
++
++ case R_SW64_GOTTPREL:
++ need = NEED_GOT | NEED_GOT_ENTRY;
++ gotent_flags = SW64_ELF_LINK_HASH_TLS_IE;
++ if (bfd_link_pic (info))
++ info->flags |= DF_STATIC_TLS;
++ break;
++
++ case R_SW64_TPREL64:
++ if (bfd_link_dll (info))
++ {
++ info->flags |= DF_STATIC_TLS;
++ need = NEED_DYNREL;
++ }
++ else if (maybe_dynamic)
++ need = NEED_DYNREL;
++ break;
++ }
++
++ if (need & NEED_GOT)
++ {
++ if (sw64_elf_tdata(abfd)->gotobj == NULL)
++ {
++ if (!elf64_sw64_create_got_section (abfd, info))
++ return false;
++ }
++ }
++
++ if (need & NEED_GOT_ENTRY)
++ {
++ struct sw64_elf_got_entry *gotent;
++
++ gotent = get_got_entry (abfd, h, r_type, r_symndx, addend);
++ if (!gotent)
++ return false;
++
++ if (gotent_flags)
++ {
++ gotent->flags |= gotent_flags;
++ if (h)
++ {
++ gotent_flags |= h->flags;
++ h->flags = gotent_flags;
++
++ /* Make a guess as to whether a .plt entry is needed. */
++ /* ??? It appears that we won't make it into
++ adjust_dynamic_symbol for symbols that remain
++ totally undefined. Copying this check here means
++ we can create a plt entry for them too. */
++ h->root.needs_plt
++ = (maybe_dynamic && elf64_sw64_want_plt (h));
++ }
++ }
++ }
++
++ if (need & NEED_DYNREL)
++ {
++ /* We need to create the section here now whether we eventually
++ use it or not so that it gets mapped to an output section by
++ the linker. If not used, we'll kill it in size_dynamic_sections. */
++ if (sreloc == NULL)
++ {
++ sreloc = _bfd_elf_make_dynamic_reloc_section
++ (sec, dynobj, 3, abfd, /*rela?*/ true);
++
++ if (sreloc == NULL)
++ return false;
++ }
++
++ if (h)
++ {
++ /* Since we havn't seen all of the input symbols yet, we
++ don't know whether we'll actually need a dynamic relocation
++ entry for this reloc. So make a record of it. Once we
++ find out if this thing needs dynamic relocation we'll
++ expand the relocation sections by the appropriate amount. */
++
++ struct sw64_elf_reloc_entry *rent;
++
++ for (rent = h->reloc_entries; rent; rent = rent->next)
++ if (rent->rtype == r_type && rent->srel == sreloc)
++ break;
++
++ if (!rent)
++ {
++ size_t amt = sizeof (struct sw64_elf_reloc_entry);
++ rent = (struct sw64_elf_reloc_entry *) bfd_alloc (abfd, amt);
++ if (!rent)
++ return false;
++
++ rent->srel = sreloc;
++ rent->sec = sec;
++ rent->rtype = r_type;
++ rent->count = 1;
++
++ rent->next = h->reloc_entries;
++ h->reloc_entries = rent;
++ }
++ else
++ rent->count++;
++ }
++ else if (bfd_link_pic (info))
++ {
++ /* If this is a shared library, and the section is to be
++ loaded into memory, we need a RELATIVE reloc. */
++ sreloc->size += sizeof (Elf64_External_Rela);
++ if (sec->flags & SEC_READONLY)
++ {
++ info->flags |= DF_TEXTREL;
++ info->callbacks->minfo
++ (_("%pB: dynamic relocation against a local symbol in "
++ "read-only section `%pA'\n"),
++ sec->owner, sec);
++ }
++ }
++ }
++ }
++
++ return true;
++}
++
++/* Return the section that should be marked against GC for a given
++ relocation. */
++
++static asection *
++elf64_sw64_gc_mark_hook (asection *sec, struct bfd_link_info *info,
++ Elf_Internal_Rela *rel,
++ struct elf_link_hash_entry *h, Elf_Internal_Sym *sym)
++{
++ /* These relocations don't really reference a symbol. Instead we store
++ extra data in their addend slot. Ignore the symbol. */
++ switch (ELF64_R_TYPE (rel->r_info))
++ {
++ case R_SW64_LITUSE:
++ case R_SW64_GPDISP:
++ case R_SW64_HINT:
++ return NULL;
++ }
++
++ return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
++}
++
++/* Adjust a symbol defined by a dynamic object and referenced by a
++ regular object. The current definition is in some section of the
++ dynamic object, but we're not including those sections. We have to
++ change the definition to something the rest of the link can
++ understand. */
++
++static bool
++elf64_sw64_adjust_dynamic_symbol (struct bfd_link_info *info,
++ struct elf_link_hash_entry *h)
++{
++ bfd *dynobj;
++ asection *s;
++ struct sw64_elf_link_hash_entry *ah;
++
++ dynobj = elf_hash_table(info)->dynobj;
++ ah = (struct sw64_elf_link_hash_entry *)h;
++
++ /* Now that we've seen all of the input symbols, finalize our decision
++ about whether this symbol should get a .plt entry. Irritatingly, it
++ is common for folk to leave undefined symbols in shared libraries,
++ and they still expect lazy binding; accept undefined symbols in lieu
++ of STT_FUNC. */
++ if (sw64_elf_dynamic_symbol_p (h, info) && elf64_sw64_want_plt (ah))
++ {
++ h->needs_plt = true;
++
++ s = elf_hash_table(info)->splt;
++ if (!s && !elf64_sw64_create_dynamic_sections (dynobj, info))
++ return false;
++
++ /* We need one plt entry per got subsection. Delay allocation of
++ the actual plt entries until size_plt_section, called from
++ size_dynamic_sections or during relaxation. */
++
++ return true;
++ }
++ else
++ h->needs_plt = false;
++
++ /* If this is a weak symbol, and there is a real definition, the
++ processor independent code will have arranged for us to see the
++ real definition first, and we can just use the same value. */
++ if (h->is_weakalias)
++ {
++ struct elf_link_hash_entry *def = weakdef (h);
++ BFD_ASSERT (def->root.type == bfd_link_hash_defined);
++ h->root.u.def.section = def->root.u.def.section;
++ h->root.u.def.value = def->root.u.def.value;
++ return true;
++ }
++
++ /* This is a reference to a symbol defined by a dynamic object which
++ is not a function. The SW64, since it uses .got entries for all
++ symbols even in regular objects, does not need the hackery of a
++ .dynbss section and COPY dynamic relocations. */
++
++ return true;
++}
++
++/* Record STO_SW64_NOPV and STO_SW64_STD_GPLOAD. */
++
++static void
++elf64_sw64_merge_symbol_attribute (struct elf_link_hash_entry *h,
++ unsigned int st_other,
++ bool definition,
++ bool dynamic)
++{
++ if (!dynamic && definition)
++ h->other = ((h->other & ELF_ST_VISIBILITY (-1))
++ | (st_other & ~ELF_ST_VISIBILITY (-1)));
++}
++
++/* Symbol versioning can create new symbols, and make our old symbols
++ indirect to the new ones. Consolidate the got and reloc information
++ in these situations. */
++
++static void
++elf64_sw64_copy_indirect_symbol (struct bfd_link_info *info,
++ struct elf_link_hash_entry *dir,
++ struct elf_link_hash_entry *ind)
++{
++ struct sw64_elf_link_hash_entry *hi
++ = (struct sw64_elf_link_hash_entry *) ind;
++ struct sw64_elf_link_hash_entry *hs
++ = (struct sw64_elf_link_hash_entry *) dir;
++
++ /* Do the merging in the superclass. */
++ _bfd_elf_link_hash_copy_indirect(info, dir, ind);
++
++ /* Merge the flags. Whee. */
++ hs->flags |= hi->flags;
++
++ /* ??? It's unclear to me what's really supposed to happen when
++ "merging" defweak and defined symbols, given that we don't
++ actually throw away the defweak. This more-or-less copies
++ the logic related to got and plt entries in the superclass. */
++ if (ind->root.type != bfd_link_hash_indirect)
++ return;
++
++ /* Merge the .got entries. Cannibalize the old symbol's list in
++ doing so, since we don't need it anymore. */
++
++ if (hs->got_entries == NULL)
++ hs->got_entries = hi->got_entries;
++ else
++ {
++ struct sw64_elf_got_entry *gi, *gs, *gin, *gsh;
++
++ gsh = hs->got_entries;
++ for (gi = hi->got_entries; gi ; gi = gin)
++ {
++ gin = gi->next;
++ for (gs = gsh; gs ; gs = gs->next)
++ if (gi->gotobj == gs->gotobj
++ && gi->reloc_type == gs->reloc_type
++ && gi->addend == gs->addend)
++ {
++ gs->use_count += gi->use_count;
++ goto got_found;
++ }
++ gi->next = hs->got_entries;
++ hs->got_entries = gi;
++ got_found:;
++ }
++ }
++ hi->got_entries = NULL;
++
++ /* And similar for the reloc entries. */
++
++ if (hs->reloc_entries == NULL)
++ hs->reloc_entries = hi->reloc_entries;
++ else
++ {
++ struct sw64_elf_reloc_entry *ri, *rs, *rin, *rsh;
++
++ rsh = hs->reloc_entries;
++ for (ri = hi->reloc_entries; ri ; ri = rin)
++ {
++ rin = ri->next;
++ for (rs = rsh; rs ; rs = rs->next)
++ if (ri->rtype == rs->rtype && ri->srel == rs->srel)
++ {
++ rs->count += ri->count;
++ goto found_reloc;
++ }
++ ri->next = hs->reloc_entries;
++ hs->reloc_entries = ri;
++ found_reloc:;
++ }
++ }
++ hi->reloc_entries = NULL;
++}
++
++/* Is it possible to merge two object file's .got tables? */
++
++static bool
++elf64_sw64_can_merge_gots (bfd *a, bfd *b)
++{
++ int total = sw64_elf_tdata (a)->total_got_size;
++ bfd *bsub;
++
++ /* Trivial quick fallout test. */
++ if (total + sw64_elf_tdata (b)->total_got_size <= MAX_GOT_SIZE)
++ return true;
++
++ /* By their nature, local .got entries cannot be merged. */
++ if ((total += sw64_elf_tdata (b)->local_got_size) > MAX_GOT_SIZE)
++ return false;
++
++ /* Failing the common trivial comparison, we must effectively
++ perform the merge. Not actually performing the merge means that
++ we don't have to store undo information in case we fail. */
++ for (bsub = b; bsub ; bsub = sw64_elf_tdata (bsub)->in_got_link_next)
++ {
++ struct sw64_elf_link_hash_entry **hashes = sw64_elf_sym_hashes (bsub);
++ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (bsub)->symtab_hdr;
++ int i, n;
++
++ n = NUM_SHDR_ENTRIES (symtab_hdr) - symtab_hdr->sh_info;
++ for (i = 0; i < n; ++i)
++ {
++ struct sw64_elf_got_entry *ae, *be;
++ struct sw64_elf_link_hash_entry *h;
++
++ h = hashes[i];
++ while (h->root.root.type == bfd_link_hash_indirect
++ || h->root.root.type == bfd_link_hash_warning)
++ h = (struct sw64_elf_link_hash_entry *)h->root.root.u.i.link;
++
++ for (be = h->got_entries; be ; be = be->next)
++ {
++ if (be->use_count == 0)
++ continue;
++ if (be->gotobj != b)
++ continue;
++
++ for (ae = h->got_entries; ae ; ae = ae->next)
++ if (ae->gotobj == a
++ && ae->reloc_type == be->reloc_type
++ && ae->addend == be->addend)
++ goto global_found;
++
++ total += sw64_got_entry_size (be->reloc_type);
++ if (total > MAX_GOT_SIZE)
++ return false;
++ global_found:;
++ }
++ }
++ }
++
++ return true;
++}
++
++/* Actually merge two .got tables. */
++
++static void
++elf64_sw64_merge_gots (bfd *a, bfd *b)
++{
++ int total = sw64_elf_tdata (a)->total_got_size;
++ bfd *bsub;
++
++ /* Remember local expansion. */
++ {
++ int e = sw64_elf_tdata (b)->local_got_size;
++ total += e;
++ sw64_elf_tdata (a)->local_got_size += e;
++ }
++
++ for (bsub = b; bsub ; bsub = sw64_elf_tdata (bsub)->in_got_link_next)
++ {
++ struct sw64_elf_got_entry **local_got_entries;
++ struct sw64_elf_link_hash_entry **hashes;
++ Elf_Internal_Shdr *symtab_hdr;
++ int i, n;
++
++ /* Let the local .got entries know they are part of a new subsegment. */
++ local_got_entries = sw64_elf_tdata (bsub)->local_got_entries;
++ if (local_got_entries)
++ {
++ n = elf_tdata (bsub)->symtab_hdr.sh_info;
++ for (i = 0; i < n; ++i)
++ {
++ struct sw64_elf_got_entry *ent;
++ for (ent = local_got_entries[i]; ent; ent = ent->next)
++ ent->gotobj = a;
++ }
++ }
++
++ /* Merge the global .got entries. */
++ hashes = sw64_elf_sym_hashes (bsub);
++ symtab_hdr = &elf_tdata (bsub)->symtab_hdr;
++
++ n = NUM_SHDR_ENTRIES (symtab_hdr) - symtab_hdr->sh_info;
++ for (i = 0; i < n; ++i)
++ {
++ struct sw64_elf_got_entry *ae, *be, **pbe, **start;
++ struct sw64_elf_link_hash_entry *h;
++
++ h = hashes[i];
++ while (h->root.root.type == bfd_link_hash_indirect
++ || h->root.root.type == bfd_link_hash_warning)
++ h = (struct sw64_elf_link_hash_entry *)h->root.root.u.i.link;
++
++ pbe = start = &h->got_entries;
++ while ((be = *pbe) != NULL)
++ {
++ if (be->use_count == 0)
++ {
++ *pbe = be->next;
++ memset (be, 0xa5, sizeof (*be));
++ goto kill;
++ }
++ if (be->gotobj != b)
++ goto next;
++
++ for (ae = *start; ae ; ae = ae->next)
++ if (ae->gotobj == a
++ && ae->reloc_type == be->reloc_type
++ && ae->addend == be->addend)
++ {
++ ae->flags |= be->flags;
++ ae->use_count += be->use_count;
++ *pbe = be->next;
++ memset (be, 0xa5, sizeof (*be));
++ goto kill;
++ }
++ be->gotobj = a;
++ total += sw64_got_entry_size (be->reloc_type);
++
++ next:;
++ pbe = &be->next;
++ kill:;
++ }
++ }
++
++ sw64_elf_tdata (bsub)->gotobj = a;
++ }
++ sw64_elf_tdata (a)->total_got_size = total;
++
++ /* Merge the two in_got chains. */
++ {
++ bfd *next;
++
++ bsub = a;
++ while ((next = sw64_elf_tdata (bsub)->in_got_link_next) != NULL)
++ bsub = next;
++
++ sw64_elf_tdata (bsub)->in_got_link_next = b;
++ }
++}
++
++/* Calculate the offsets for the got entries. */
++
++static bool
++elf64_sw64_calc_got_offsets_for_symbol (struct sw64_elf_link_hash_entry *h,
++ void * arg ATTRIBUTE_UNUSED)
++{
++ struct sw64_elf_got_entry *gotent;
++
++ for (gotent = h->got_entries; gotent; gotent = gotent->next)
++ if (gotent->use_count > 0)
++ {
++ struct sw64_elf_obj_tdata *td;
++ bfd_size_type *plge;
++
++ td = sw64_elf_tdata (gotent->gotobj);
++ plge = &td->got->size;
++ gotent->got_offset = *plge;
++ *plge += sw64_got_entry_size (gotent->reloc_type);
++ }
++
++ return true;
++}
++
++static void
++elf64_sw64_calc_got_offsets (struct bfd_link_info *info)
++{
++ bfd *i, *got_list;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return;
++ got_list = htab->got_list;
++
++ /* First, zero out the .got sizes, as we may be recalculating the
++ .got after optimizing it. */
++ for (i = got_list; i ; i = sw64_elf_tdata(i)->got_link_next)
++ sw64_elf_tdata(i)->got->size = 0;
++
++ /* Next, fill in the offsets for all the global entries. */
++ sw64_elf_link_hash_traverse (htab,
++ elf64_sw64_calc_got_offsets_for_symbol,
++ NULL);
++
++ /* Finally, fill in the offsets for the local entries. */
++ for (i = got_list; i ; i = sw64_elf_tdata(i)->got_link_next)
++ {
++ bfd_size_type got_offset = sw64_elf_tdata(i)->got->size;
++ bfd *j;
++
++ for (j = i; j ; j = sw64_elf_tdata(j)->in_got_link_next)
++ {
++ struct sw64_elf_got_entry **local_got_entries, *gotent;
++ int k, n;
++
++ local_got_entries = sw64_elf_tdata(j)->local_got_entries;
++ if (!local_got_entries)
++ continue;
++
++ for (k = 0, n = elf_tdata(j)->symtab_hdr.sh_info; k < n; ++k)
++ for (gotent = local_got_entries[k]; gotent; gotent = gotent->next)
++ if (gotent->use_count > 0)
++ {
++ gotent->got_offset = got_offset;
++ got_offset += sw64_got_entry_size (gotent->reloc_type);
++ }
++ }
++
++ sw64_elf_tdata(i)->got->size = got_offset;
++ }
++}
++
++/* Constructs the gots. */
++
++static bool
++elf64_sw64_size_got_sections (struct bfd_link_info *info,
++ bool may_merge)
++{
++ bfd *i, *got_list, *cur_got_obj = NULL;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return false;
++ got_list = htab->got_list;
++
++ /* On the first time through, pretend we have an existing got list
++ consisting of all of the input files. */
++ if (got_list == NULL)
++ {
++ for (i = info->input_bfds; i ; i = i->link.next)
++ {
++ bfd *this_got;
++
++ if (! is_sw64_elf (i))
++ continue;
++
++ this_got = sw64_elf_tdata (i)->gotobj;
++ if (this_got == NULL)
++ continue;
++
++ /* We are assuming no merging has yet occurred. */
++ BFD_ASSERT (this_got == i);
++
++ if (sw64_elf_tdata (this_got)->total_got_size > MAX_GOT_SIZE)
++ {
++ /* Yikes! A single object file has too many entries. */
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: .got subsegment exceeds 64K (size %d)"),
++ i, sw64_elf_tdata (this_got)->total_got_size);
++ return false;
++ }
++
++ if (got_list == NULL)
++ got_list = this_got;
++ else
++ sw64_elf_tdata(cur_got_obj)->got_link_next = this_got;
++ cur_got_obj = this_got;
++ }
++
++ /* Strange degenerate case of no got references. */
++ if (got_list == NULL)
++ return true;
++
++ htab->got_list = got_list;
++ }
++
++ cur_got_obj = got_list;
++ if (cur_got_obj == NULL)
++ return false;
++
++ if (may_merge)
++ {
++ i = sw64_elf_tdata(cur_got_obj)->got_link_next;
++ while (i != NULL)
++ {
++ if (elf64_sw64_can_merge_gots (cur_got_obj, i))
++ {
++ elf64_sw64_merge_gots (cur_got_obj, i);
++
++ sw64_elf_tdata(i)->got->size = 0;
++ i = sw64_elf_tdata(i)->got_link_next;
++ sw64_elf_tdata(cur_got_obj)->got_link_next = i;
++ }
++ else
++ {
++ cur_got_obj = i;
++ i = sw64_elf_tdata(i)->got_link_next;
++ }
++ }
++ }
++
++ /* Once the gots have been merged, fill in the got offsets for
++ everything therein. */
++ elf64_sw64_calc_got_offsets (info);
++
++ return true;
++}
++
++static bool
++elf64_sw64_size_plt_section_1 (struct sw64_elf_link_hash_entry *h,
++ void * data)
++{
++ asection *splt = (asection *) data;
++ struct sw64_elf_got_entry *gotent;
++ bool saw_one = false;
++
++ /* If we didn't need an entry before, we still don't. */
++ if (!h->root.needs_plt)
++ return true;
++
++ /* For each LITERAL got entry still in use, allocate a plt entry. */
++ for (gotent = h->got_entries; gotent ; gotent = gotent->next)
++ if (gotent->reloc_type == R_SW64_LITERAL
++ && gotent->use_count > 0)
++ {
++ if (splt->size == 0)
++ splt->size = PLT_HEADER_SIZE;
++ gotent->plt_offset = splt->size;
++ splt->size += PLT_ENTRY_SIZE;
++ saw_one = true;
++ }
++
++ /* If there weren't any, there's no longer a need for the PLT entry. */
++ if (!saw_one)
++ h->root.needs_plt = false;
++
++ return true;
++}
++
++/* Called from relax_section to rebuild the PLT in light of potential changes
++ in the function's status. */
++
++static void
++elf64_sw64_size_plt_section (struct bfd_link_info *info)
++{
++ asection *splt, *spltrel, *sgotplt;
++ unsigned long entries;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return;
++
++ splt = elf_hash_table(info)->splt;
++ if (splt == NULL)
++ return;
++
++ splt->size = 0;
++
++ sw64_elf_link_hash_traverse (htab,
++ elf64_sw64_size_plt_section_1, splt);
++
++ /* Every plt entry requires a JMP_SLOT relocation. */
++ spltrel = elf_hash_table(info)->srelplt;
++ entries = 0;
++ if (splt->size)
++ {
++ if (elf64_sw64_use_secureplt)
++ entries = (splt->size - NEW_PLT_HEADER_SIZE) / NEW_PLT_ENTRY_SIZE;
++ else
++ entries = (splt->size - OLD_PLT_HEADER_SIZE) / OLD_PLT_ENTRY_SIZE;
++ }
++ spltrel->size = entries * sizeof (Elf64_External_Rela);
++
++ /* When using the secureplt, we need two words somewhere in the data
++ segment for the dynamic linker to tell us where to go. This is the
++ entire contents of the .got.plt section. */
++ if (elf64_sw64_use_secureplt)
++ {
++ sgotplt = elf_hash_table(info)->sgotplt;
++ sgotplt->size = entries ? 16 : 0;
++ }
++}
++
++static bool
++elf64_sw64_always_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
++ struct bfd_link_info *info)
++{
++ bfd *i;
++ struct sw64_elf_link_hash_table * htab;
++
++ if (bfd_link_relocatable (info))
++ return true;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return false;
++
++ if (!elf64_sw64_size_got_sections (info, true))
++ return false;
++
++ /* Allocate space for all of the .got subsections. */
++ i = htab->got_list;
++ for ( ; i ; i = sw64_elf_tdata(i)->got_link_next)
++ {
++ asection *s = sw64_elf_tdata(i)->got;
++ if (s->size > 0)
++ {
++ s->contents = (bfd_byte *) bfd_zalloc (i, s->size);
++ if (s->contents == NULL)
++ return false;
++ }
++ }
++
++ return true;
++}
++
++/* The number of dynamic relocations required by a static relocation. */
++
++static int
++sw64_dynamic_entries_for_reloc (int r_type, int dynamic, int shared, int pie)
++{
++ switch (r_type)
++ {
++ /* May appear in GOT entries. */
++ case R_SW64_TLSGD:
++ return (dynamic ? 2 : shared ? 1 : 0);
++ case R_SW64_TLSLDM:
++ return shared;
++ case R_SW64_LITERAL:
++ return dynamic || shared;
++ case R_SW64_GOTTPREL:
++ return dynamic || (shared && !pie);
++ case R_SW64_GOTDTPREL:
++ return dynamic;
++
++ /* May appear in data sections. */
++ case R_SW64_REFLONG:
++ case R_SW64_REFQUAD:
++ return dynamic || shared;
++ case R_SW64_TPREL64:
++ return dynamic || (shared && !pie);
++
++ /* Everything else is illegal. We'll issue an error during
++ relocate_section. */
++ default:
++ return 0;
++ }
++}
++
++/* Work out the sizes of the dynamic relocation entries. */
++
++static bool
++elf64_sw64_calc_dynrel_sizes (struct sw64_elf_link_hash_entry *h,
++ struct bfd_link_info *info)
++{
++ bool dynamic;
++ struct sw64_elf_reloc_entry *relent;
++ unsigned long entries;
++
++ /* If the symbol was defined as a common symbol in a regular object
++ file, and there was no definition in any dynamic object, then the
++ linker will have allocated space for the symbol in a common
++ section but the ELF_LINK_HASH_DEF_REGULAR flag will not have been
++ set. This is done for dynamic symbols in
++ elf_adjust_dynamic_symbol but this is not done for non-dynamic
++ symbols, somehow. */
++ if (!h->root.def_regular
++ && h->root.ref_regular
++ && !h->root.def_dynamic
++ && (h->root.root.type == bfd_link_hash_defined
++ || h->root.root.type == bfd_link_hash_defweak)
++ && !(h->root.root.u.def.section->owner->flags & DYNAMIC))
++ h->root.def_regular = 1;
++
++ /* If the symbol is dynamic, we'll need all the relocations in their
++ natural form. If this is a shared object, and it has been forced
++ local, we'll need the same number of RELATIVE relocations. */
++ dynamic = sw64_elf_dynamic_symbol_p (&h->root, info);
++
++ /* If the symbol is a hidden undefined weak, then we never have any
++ relocations. Avoid the loop which may want to add RELATIVE relocs
++ based on bfd_link_pic (info). */
++ if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
++ return true;
++
++ for (relent = h->reloc_entries; relent; relent = relent->next)
++ {
++ entries = sw64_dynamic_entries_for_reloc (relent->rtype, dynamic,
++ bfd_link_pic (info),
++ bfd_link_pie (info));
++ if (entries)
++ {
++ asection *sec = relent->sec;
++ relent->srel->size +=
++ entries * sizeof (Elf64_External_Rela) * relent->count;
++ if ((sec->flags & SEC_READONLY) != 0)
++ {
++ info->flags |= DT_TEXTREL;
++ info->callbacks->minfo
++ (_("%pB: dynamic relocation against `%pT' in "
++ "read-only section `%pA'\n"),
++ sec->owner, h->root.root.root.string, sec);
++ }
++ }
++ }
++
++ return true;
++}
++
++/* Subroutine of elf64_sw64_size_rela_got_section for doing the
++ global symbols. */
++
++static bool
++elf64_sw64_size_rela_got_1 (struct sw64_elf_link_hash_entry *h,
++ struct bfd_link_info *info)
++{
++ bool dynamic;
++ struct sw64_elf_got_entry *gotent;
++ unsigned long entries;
++
++ /* If we're using a plt for this symbol, then all of its relocations
++ for its got entries go into .rela.plt. */
++ if (h->root.needs_plt)
++ return true;
++
++ /* If the symbol is dynamic, we'll need all the relocations in their
++ natural form. If this is a shared object, and it has been forced
++ local, we'll need the same number of RELATIVE relocations. */
++ dynamic = sw64_elf_dynamic_symbol_p (&h->root, info);
++
++ /* If the symbol is a hidden undefined weak, then we never have any
++ relocations. Avoid the loop which may want to add RELATIVE relocs
++ based on bfd_link_pic (info). */
++ if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
++ return true;
++
++ entries = 0;
++ for (gotent = h->got_entries; gotent ; gotent = gotent->next)
++ if (gotent->use_count > 0)
++ entries += sw64_dynamic_entries_for_reloc (gotent->reloc_type, dynamic,
++ bfd_link_pic (info),
++ bfd_link_pie (info));
++
++ if (entries > 0)
++ {
++ asection *srel = elf_hash_table(info)->srelgot;
++ BFD_ASSERT (srel != NULL);
++ srel->size += sizeof (Elf64_External_Rela) * entries;
++ }
++
++ return true;
++}
++
++/* Set the sizes of the dynamic relocation sections. */
++
++static void
++elf64_sw64_size_rela_got_section (struct bfd_link_info *info)
++{
++ unsigned long entries;
++ bfd *i;
++ asection *srel;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return;
++
++ /* Shared libraries often require RELATIVE relocs, and some relocs
++ require attention for the main application as well. */
++
++ entries = 0;
++ for (i = htab->got_list;
++ i ; i = sw64_elf_tdata(i)->got_link_next)
++ {
++ bfd *j;
++
++ for (j = i; j ; j = sw64_elf_tdata(j)->in_got_link_next)
++ {
++ struct sw64_elf_got_entry **local_got_entries, *gotent;
++ int k, n;
++
++ local_got_entries = sw64_elf_tdata(j)->local_got_entries;
++ if (!local_got_entries)
++ continue;
++
++ for (k = 0, n = elf_tdata(j)->symtab_hdr.sh_info; k < n; ++k)
++ for (gotent = local_got_entries[k];
++ gotent ; gotent = gotent->next)
++ if (gotent->use_count > 0)
++ entries += (sw64_dynamic_entries_for_reloc
++ (gotent->reloc_type, 0, bfd_link_pic (info),
++ bfd_link_pie (info)));
++ }
++ }
++
++ srel = elf_hash_table(info)->srelgot;
++ if (!srel)
++ {
++ BFD_ASSERT (entries == 0);
++ return;
++ }
++ srel->size = sizeof (Elf64_External_Rela) * entries;
++
++ /* Now do the non-local symbols. */
++ sw64_elf_link_hash_traverse (htab,
++ elf64_sw64_size_rela_got_1, info);
++}
++
++/* Set the sizes of the dynamic sections. */
++
++static bool
++elf64_sw64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
++ struct bfd_link_info *info)
++{
++ bfd *dynobj;
++ asection *s;
++ bool relplt, relocs;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return false;
++
++ dynobj = elf_hash_table(info)->dynobj;
++ BFD_ASSERT(dynobj != NULL);
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ /* Set the contents of the .interp section to the interpreter. */
++ if (bfd_link_executable (info) && !info->nointerp)
++ {
++ s = bfd_get_linker_section (dynobj, ".interp");
++ BFD_ASSERT (s != NULL);
++ s->size = sizeof ELF_DYNAMIC_INTERPRETER;
++ s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
++ }
++
++ /* Now that we've seen all of the input files, we can decide which
++ symbols need dynamic relocation entries and which don't. We've
++ collected information in check_relocs that we can now apply to
++ size the dynamic relocation sections. */
++ sw64_elf_link_hash_traverse (htab,
++ elf64_sw64_calc_dynrel_sizes, info);
++
++ elf64_sw64_size_rela_got_section (info);
++ elf64_sw64_size_plt_section (info);
++ }
++ /* else we're not dynamic and by definition we don't need such things. */
++
++ /* The check_relocs and adjust_dynamic_symbol entry points have
++ determined the sizes of the various dynamic sections. Allocate
++ memory for them. */
++ relplt = false;
++ relocs = false;
++ for (s = dynobj->sections; s != NULL; s = s->next)
++ {
++ const char *name;
++
++ if (!(s->flags & SEC_LINKER_CREATED))
++ continue;
++
++ /* It's OK to base decisions on the section name, because none
++ of the dynobj section names depend upon the input files. */
++ name = bfd_section_name (s);
++
++ if (startswith (name, ".rela"))
++ {
++ if (s->size != 0)
++ {
++ if (strcmp (name, ".rela.plt") == 0)
++ relplt = true;
++ else
++ relocs = true;
++
++ /* We use the reloc_count field as a counter if we need
++ to copy relocs into the output file. */
++ s->reloc_count = 0;
++ }
++ }
++ else if (! startswith (name, ".got")
++ && strcmp (name, ".plt") != 0
++ && strcmp (name, ".dynbss") != 0)
++ {
++ /* It's not one of our dynamic sections, so don't allocate space. */
++ continue;
++ }
++
++ if (s->size == 0)
++ {
++ /* If we don't need this section, strip it from the output file.
++ This is to handle .rela.bss and .rela.plt. We must create it
++ in create_dynamic_sections, because it must be created before
++ the linker maps input sections to output sections. The
++ linker does that before adjust_dynamic_symbol is called, and
++ it is that function which decides whether anything needs to
++ go into these sections. */
++ if (!startswith (name, ".got"))
++ s->flags |= SEC_EXCLUDE;
++ }
++ else if ((s->flags & SEC_HAS_CONTENTS) != 0)
++ {
++ /* Allocate memory for the section contents. */
++ s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
++ if (s->contents == NULL)
++ return false;
++ }
++ }
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ /* Add some entries to the .dynamic section. We fill in the
++ values later, in elf64_sw64_finish_dynamic_sections, but we
++ must add the entries now so that we get the correct size for
++ the .dynamic section. The DT_DEBUG entry is filled in by the
++ dynamic linker and used by the debugger. */
++#define add_dynamic_entry(TAG, VAL) \
++ _bfd_elf_add_dynamic_entry (info, TAG, VAL)
++
++ if (!_bfd_elf_add_dynamic_tags (output_bfd, info,
++ relocs || relplt))
++ return false;
++
++ if (relplt
++ && elf64_sw64_use_secureplt
++ && !add_dynamic_entry (DT_SW64_PLTRO, 1))
++ return false;
++ }
++#undef add_dynamic_entry
++
++ return true;
++}
++
++/* These functions do relaxation for SW64 ELF.
++
++ Currently I'm only handling what I can do with existing compiler
++ and assembler support, which means no instructions are removed,
++ though some may be nopped. At this time GCC does not emit enough
++ information to do all of the relaxing that is possible. It will
++ take some not small amount of work for that to happen.
++
++ There are a couple of interesting papers that I once read on this
++ subject, that I cannot find references to at the moment, that
++ related to SW64 in particular. They are by David Wall, then of
++ DEC WRL. */
++
++struct sw64_relax_info
++{
++ bfd *abfd;
++ asection *sec;
++ bfd_byte *contents;
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Rela *relocs, *relend;
++ struct bfd_link_info *link_info;
++ bfd_vma gp;
++ bfd *gotobj;
++ asection *tsec;
++ struct sw64_elf_link_hash_entry *h;
++ struct sw64_elf_got_entry **first_gotent;
++ struct sw64_elf_got_entry *gotent;
++ bool changed_contents;
++ bool changed_relocs;
++ unsigned char other;
++};
++
++static Elf_Internal_Rela *
++elf64_sw64_find_reloc_at_ofs (Elf_Internal_Rela *rel,
++ Elf_Internal_Rela *relend,
++ bfd_vma offset, int type)
++{
++ while (rel < relend)
++ {
++ if (rel->r_offset == offset
++ && ELF64_R_TYPE (rel->r_info) == (unsigned int) type)
++ return rel;
++ ++rel;
++ }
++ return NULL;
++}
++
++static bool
++elf64_sw64_relax_got_load (struct sw64_relax_info *info, bfd_vma symval,
++ Elf_Internal_Rela *irel, unsigned long r_type)
++{
++ unsigned int insn;
++ bfd_signed_vma disp;
++
++ /* Get the instruction. */
++ insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
++
++ if (insn >> 26 != OP_LDQ)
++ {
++ reloc_howto_type *howto = elf64_sw64_howto_table + r_type;
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: %pA+%#" PRIx64 ": warning: "
++ "%s relocation against unexpected insn"),
++ info->abfd, info->sec, (uint64_t) irel->r_offset, howto->name);
++ return true;
++ }
++
++ /* Can't relax dynamic symbols. */
++ if (info->h != NULL
++ && sw64_elf_dynamic_symbol_p (&info->h->root, info->link_info))
++ return true;
++
++ /* Can't use local-exec relocations in shared libraries. */
++ if (r_type == R_SW64_GOTTPREL
++ && bfd_link_dll (info->link_info))
++ return true;
++
++ if (r_type == R_SW64_LITERAL)
++ {
++ /* Look for nice constant addresses. This includes the not-uncommon
++ special case of 0 for undefweak symbols. */
++ if ((info->h && info->h->root.root.type == bfd_link_hash_undefweak)
++ || (!bfd_link_pic (info->link_info)
++ && (symval >= (bfd_vma)-0x8000 || symval < 0x8000)))
++ {
++ disp = 0;
++ insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
++ insn |= (symval & 0xffff);
++ r_type = R_SW64_NONE;
++ }
++ else
++ {
++ /* We may only create GPREL relocs during the second pass. */
++ if (info->link_info->relax_pass == 0)
++ return true;
++
++ disp = symval - info->gp;
++ insn = (OP_LDA << 26) | (insn & 0x03ff0000);
++ r_type = R_SW64_GPREL16;
++ }
++ }
++ else
++ {
++ bfd_vma dtp_base, tp_base;
++
++ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
++ dtp_base = sw64_get_dtprel_base (info->link_info);
++ tp_base = sw64_get_tprel_base (info->link_info);
++ disp = symval - (r_type == R_SW64_GOTDTPREL ? dtp_base : tp_base);
++
++ insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
++
++ switch (r_type)
++ {
++ case R_SW64_GOTDTPREL:
++ r_type = R_SW64_DTPREL16;
++ break;
++ case R_SW64_GOTTPREL:
++ r_type = R_SW64_TPREL16;
++ break;
++ default:
++ BFD_ASSERT (0);
++ return false;
++ }
++ }
++
++ if (disp < -0x8000 || disp >= 0x8000)
++ return true;
++
++ bfd_put_32 (info->abfd, (bfd_vma) insn, info->contents + irel->r_offset);
++ info->changed_contents = true;
++
++ /* Reduce the use count on this got entry by one, possibly
++ eliminating it. */
++ if (--info->gotent->use_count == 0)
++ {
++ int sz = sw64_got_entry_size (r_type);
++ sw64_elf_tdata (info->gotobj)->total_got_size -= sz;
++ if (!info->h)
++ sw64_elf_tdata (info->gotobj)->local_got_size -= sz;
++ }
++
++ /* Smash the existing GOT relocation for its 16-bit immediate pair. */
++ irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), r_type);
++ info->changed_relocs = true;
++
++ /* ??? Search forward through this basic block looking for insns
++ that use the target register. Stop after an insn modifying the
++ register is seen, or after a branch or call.
++
++ Any such memory load insn may be substituted by a load directly
++ off the GP. This allows the memory load insn to be issued before
++ the calculated GP register would otherwise be ready.
++
++ Any such jsr insn can be replaced by a bsr if it is in range.
++
++ This would mean that we'd have to _add_ relocations, the pain of
++ which gives one pause. */
++
++ return true;
++}
++
++static bfd_vma
++elf64_sw64_relax_opt_call (struct sw64_relax_info *info, bfd_vma symval)
++{
++ /* If the function has the same gp, and we can identify that the
++ function does not use its function pointer, we can eliminate the
++ address load. */
++
++ /* If the symbol is marked NOPV, we are being told the function never
++ needs its procedure value. */
++ if ((info->other & STO_SW64_STD_GPLOAD) == STO_SW64_NOPV)
++ return symval;
++
++ /* If the symbol is marked STD_GP, we are being told the function does
++ a normal ldgp in the first two words. */
++ else if ((info->other & STO_SW64_STD_GPLOAD) == STO_SW64_STD_GPLOAD)
++ ;
++
++ /* Otherwise, we may be able to identify a GP load in the first two
++ words, which we can then skip. */
++ else
++ {
++ Elf_Internal_Rela *tsec_relocs, *tsec_relend, *tsec_free, *gpdisp;
++ bfd_vma ofs;
++
++ /* Load the relocations from the section that the target symbol is in. */
++ if (info->sec == info->tsec)
++ {
++ tsec_relocs = info->relocs;
++ tsec_relend = info->relend;
++ tsec_free = NULL;
++ }
++ else
++ {
++ tsec_relocs = (_bfd_elf_link_read_relocs
++ (info->abfd, info->tsec, NULL,
++ (Elf_Internal_Rela *) NULL,
++ info->link_info->keep_memory));
++ if (tsec_relocs == NULL)
++ return 0;
++ tsec_relend = tsec_relocs + info->tsec->reloc_count;
++ tsec_free = (elf_section_data (info->tsec)->relocs == tsec_relocs
++ ? NULL
++ : tsec_relocs);
++ }
++
++ /* Recover the symbol's offset within the section. */
++ ofs = (symval - info->tsec->output_section->vma
++ - info->tsec->output_offset);
++
++ /* Look for a GPDISP reloc. */
++ gpdisp = (elf64_sw64_find_reloc_at_ofs
++ (tsec_relocs, tsec_relend, ofs, R_SW64_GPDISP));
++
++ if (!gpdisp || gpdisp->r_addend != 4)
++ {
++ free (tsec_free);
++ return 0;
++ }
++ free (tsec_free);
++ }
++
++ /* We've now determined that we can skip an initial gp load. Verify
++ that the call and the target use the same gp. */
++ if (info->link_info->output_bfd->xvec != info->tsec->owner->xvec
++ || info->gotobj != sw64_elf_tdata (info->tsec->owner)->gotobj)
++ return 0;
++
++ return symval + 8;
++}
++
++static bool
++elf64_sw64_relax_with_lituse (struct sw64_relax_info *info,
++ bfd_vma symval, Elf_Internal_Rela *irel)
++{
++ Elf_Internal_Rela *urel, *erel, *irelend = info->relend;
++ int flags;
++ bfd_signed_vma disp;
++ bool fits16;
++ bool fits32;
++ bool lit_reused = false;
++ bool all_optimized = true;
++ bool changed_contents;
++ bool changed_relocs;
++ bfd_byte *contents = info->contents;
++ bfd *abfd = info->abfd;
++ bfd_vma sec_output_vma;
++ unsigned int lit_insn;
++ int relax_pass;
++
++ lit_insn = bfd_get_32 (abfd, contents + irel->r_offset);
++ if (lit_insn >> 26 != OP_LDQ)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: %pA+%#" PRIx64 ": warning: "
++ "%s relocation against unexpected insn"),
++ abfd, info->sec, (uint64_t) irel->r_offset, "LITERAL");
++ return true;
++ }
++
++ /* Can't relax dynamic symbols. */
++ if (info->h != NULL
++ && sw64_elf_dynamic_symbol_p (&info->h->root, info->link_info))
++ return true;
++
++ changed_contents = info->changed_contents;
++ changed_relocs = info->changed_relocs;
++ sec_output_vma = info->sec->output_section->vma + info->sec->output_offset;
++ relax_pass = info->link_info->relax_pass;
++
++ /* Summarize how this particular LITERAL is used. */
++ for (erel = irel+1, flags = 0; erel < irelend; ++erel)
++ {
++ if (ELF64_R_TYPE (erel->r_info) != R_SW64_LITUSE)
++ break;
++ if (erel->r_addend <= 6)
++ flags |= 1 << erel->r_addend;
++ }
++
++ /* A little preparation for the loop... */
++ disp = symval - info->gp;
++
++ for (urel = irel+1; urel < erel; ++urel)
++ {
++ bfd_vma urel_r_offset = urel->r_offset;
++ unsigned int insn;
++ int insn_disp;
++ bfd_signed_vma xdisp;
++ Elf_Internal_Rela nrel;
++
++ insn = bfd_get_32 (abfd, contents + urel_r_offset);
++
++ switch (urel->r_addend)
++ {
++ case LITUSE_SW64_ADDR:
++ default:
++ /* This type is really just a placeholder to note that all
++ uses cannot be optimized, but to still allow some. */
++ all_optimized = false;
++ break;
++
++ case LITUSE_SW64_BASE:
++ /* We may only create GPREL relocs during the second pass. */
++ if (relax_pass == 0)
++ {
++ all_optimized = false;
++ break;
++ }
++
++ /* We can always optimize 16-bit displacements. */
++
++ /* Extract the displacement from the instruction, sign-extending
++ it if necessary, then test whether it is within 16 or 32 bits
++ displacement from GP. */
++ insn_disp = ((insn & 0xffff) ^ 0x8000) - 0x8000;
++
++ xdisp = disp + insn_disp;
++ fits16 = (xdisp >= - (bfd_signed_vma) 0x8000 && xdisp < 0x8000);
++ fits32 = (xdisp >= - (bfd_signed_vma) 0x80000000
++ && xdisp < 0x7fff8000);
++
++ if (fits16)
++ {
++ /* Take the op code and dest from this insn, take the base
++ register from the literal insn. Leave the offset alone. */
++ insn = (insn & 0xffe0ffff) | (lit_insn & 0x001f0000);
++ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
++ changed_contents = true;
++
++ nrel = *urel;
++ nrel.r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
++ R_SW64_GPREL16);
++ nrel.r_addend = irel->r_addend;
++
++ /* As we adjust, move the reloc to the end so that we don't
++ break the LITERAL+LITUSE chain. */
++ if (urel < --erel)
++ *urel-- = *erel;
++ *erel = nrel;
++ changed_relocs = true;
++ }
++
++ /* If all mem+byte, we can optimize 32-bit mem displacements. */
++ else if (fits32 && !(flags & ~6))
++ {
++ /* FIXME: sanity check that lit insn Ra is mem insn Rb. */
++
++ irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
++ R_SW64_GPRELHIGH);
++ lit_insn = (OP_LDAH << 26) | (lit_insn & 0x03ff0000);
++ bfd_put_32 (abfd, (bfd_vma) lit_insn, contents + irel->r_offset);
++ lit_reused = true;
++ changed_contents = true;
++
++ /* Since all relocs must be optimized, don't bother swapping
++ this relocation to the end. */
++ urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
++ R_SW64_GPRELLOW);
++ urel->r_addend = irel->r_addend;
++ changed_relocs = true;
++ }
++ else
++ all_optimized = false;
++ break;
++
++ case LITUSE_SW64_BYTOFF:
++ /* We can always optimize byte instructions. */
++
++ /* FIXME: sanity check the insn for byte op. Check that the
++ literal dest reg is indeed Rb in the byte insn. */
++
++ insn &= ~ (unsigned) 0x001ff000;
++ insn |= ((symval & 7) << 13) | 0x1000;
++ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
++ changed_contents = true;
++
++ nrel = *urel;
++ nrel.r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ nrel.r_addend = 0;
++
++ /* As we adjust, move the reloc to the end so that we don't
++ break the LITERAL+LITUSE chain. */
++ if (urel < --erel)
++ *urel-- = *erel;
++ *erel = nrel;
++ changed_relocs = true;
++ break;
++
++ case LITUSE_SW64_JSR:
++ case LITUSE_SW64_TLSGD:
++ case LITUSE_SW64_TLSLDM:
++ case LITUSE_SW64_JSRDIRECT:
++ {
++ bfd_vma optdest, org;
++ bfd_signed_vma odisp;
++
++ /* For undefined weak symbols, we're mostly interested in getting
++ rid of the got entry whenever possible, so optimize this to a
++ use of the zero register. */
++ if (info->h && info->h->root.root.type == bfd_link_hash_undefweak)
++ {
++ insn |= 31 << 16;
++ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
++
++ changed_contents = true;
++ break;
++ }
++
++ /* If not zero, place to jump without needing pv. */
++ optdest = elf64_sw64_relax_opt_call (info, symval);
++ org = sec_output_vma + urel_r_offset + 4;
++ odisp = (optdest ? optdest : symval) - org;
++
++ if (odisp >= -0x400000 && odisp < 0x400000)
++ {
++ Elf_Internal_Rela *xrel;
++
++ /* Preserve branch prediction call stack when possible. */
++ if ((insn & INSN_JSR_MASK) == INSN_JSR)
++ insn = (OP_BSR << 26) | (insn & 0x03e00000);
++ else
++ insn = (OP_BR << 26) | (insn & 0x03e00000);
++ bfd_put_32 (abfd, (bfd_vma) insn, contents + urel_r_offset);
++ changed_contents = true;
++
++ nrel = *urel;
++ nrel.r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
++ R_SW64_BRADDR);
++ nrel.r_addend = irel->r_addend;
++
++ if (optdest)
++ nrel.r_addend += optdest - symval;
++ else
++ all_optimized = false;
++
++ /* Kill any HINT reloc that might exist for this insn. */
++ xrel = (elf64_sw64_find_reloc_at_ofs
++ (info->relocs, info->relend, urel_r_offset,
++ R_SW64_HINT));
++ if (xrel)
++ xrel->r_info = ELF64_R_INFO (0, R_SW64_NONE);
++
++ /* As we adjust, move the reloc to the end so that we don't
++ break the LITERAL+LITUSE chain. */
++ if (urel < --erel)
++ *urel-- = *erel;
++ *erel = nrel;
++
++ info->changed_relocs = true;
++ }
++ else
++ all_optimized = false;
++
++ /* Even if the target is not in range for a direct branch,
++ if we share a GP, we can eliminate the gp reload. */
++ if (optdest)
++ {
++ Elf_Internal_Rela *gpdisp
++ = (elf64_sw64_find_reloc_at_ofs
++ (info->relocs, irelend, urel_r_offset + 4,
++ R_SW64_GPDISP));
++ if (gpdisp)
++ {
++ bfd_byte *p_ldah = contents + gpdisp->r_offset;
++ bfd_byte *p_lda = p_ldah + gpdisp->r_addend;
++ unsigned int ldah = bfd_get_32 (abfd, p_ldah);
++ unsigned int lda = bfd_get_32 (abfd, p_lda);
++
++ /* Verify that the instruction is "ldah $29,0($26)".
++ Consider a function that ends in a noreturn call,
++ and that the next function begins with an ldgp,
++ and that by accident there is no padding between.
++ In that case the insn would use $27 as the base. */
++ if (ldah == 0x27ba0000 && lda == 0x23bd0000)
++ {
++ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, p_ldah);
++ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, p_lda);
++
++ gpdisp->r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ changed_contents = true;
++ changed_relocs = true;
++ }
++ }
++ }
++ }
++ break;
++ }
++ }
++
++ /* If we reused the literal instruction, we must have optimized all. */
++ BFD_ASSERT(!lit_reused || all_optimized);
++
++ /* If all cases were optimized, we can reduce the use count on this
++ got entry by one, possibly eliminating it. */
++ if (all_optimized)
++ {
++ if (--info->gotent->use_count == 0)
++ {
++ int sz = sw64_got_entry_size (R_SW64_LITERAL);
++ sw64_elf_tdata (info->gotobj)->total_got_size -= sz;
++ if (!info->h)
++ sw64_elf_tdata (info->gotobj)->local_got_size -= sz;
++ }
++
++ /* If the literal instruction is no longer needed (it may have been
++ reused. We can eliminate it. */
++ /* ??? For now, I don't want to deal with compacting the section,
++ so just nop it out. */
++ if (!lit_reused)
++ {
++ irel->r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ changed_relocs = true;
++
++ bfd_put_32 (abfd, (bfd_vma) INSN_UNOP, contents + irel->r_offset);
++ changed_contents = true;
++ }
++ }
++
++ info->changed_contents = changed_contents;
++ info->changed_relocs = changed_relocs;
++
++ if (all_optimized || relax_pass == 0)
++ return true;
++ return elf64_sw64_relax_got_load (info, symval, irel, R_SW64_LITERAL);
++}
++
++static bool
++elf64_sw64_relax_tls_get_addr (struct sw64_relax_info *info, bfd_vma symval,
++ Elf_Internal_Rela *irel, bool is_gd)
++{
++ bfd_byte *pos[5];
++ unsigned int insn, tlsgd_reg;
++ Elf_Internal_Rela *gpdisp, *hint;
++ bool dynamic, use_gottprel;
++ unsigned long new_symndx;
++
++ dynamic = (info->h != NULL
++ && sw64_elf_dynamic_symbol_p (&info->h->root, info->link_info));
++
++ /* If a TLS symbol is accessed using IE at least once, there is no point
++ to use dynamic model for it. */
++ if (is_gd && info->h && (info->h->flags & SW64_ELF_LINK_HASH_TLS_IE))
++ ;
++
++ /* If the symbol is local, and we've already committed to DF_STATIC_TLS,
++ then we might as well relax to IE. */
++ else if (bfd_link_pic (info->link_info) && !dynamic
++ && (info->link_info->flags & DF_STATIC_TLS))
++ ;
++
++ /* Otherwise we must be building an executable to do anything. */
++ else if (bfd_link_pic (info->link_info))
++ return true;
++
++ /* The TLSGD/TLSLDM relocation must be followed by a LITERAL and
++ the matching LITUSE_TLS relocations. */
++ if (irel + 2 >= info->relend)
++ return true;
++ if (ELF64_R_TYPE (irel[1].r_info) != R_SW64_LITERAL
++ || ELF64_R_TYPE (irel[2].r_info) != R_SW64_LITUSE
++ || irel[2].r_addend != (is_gd ? LITUSE_SW64_TLSGD : LITUSE_SW64_TLSLDM))
++ return true;
++
++ /* There must be a GPDISP relocation positioned immediately after the
++ LITUSE relocation. */
++ gpdisp = elf64_sw64_find_reloc_at_ofs (info->relocs, info->relend,
++ irel[2].r_offset + 4, R_SW64_GPDISP);
++ if (!gpdisp)
++ return true;
++
++ pos[0] = info->contents + irel[0].r_offset;
++ pos[1] = info->contents + irel[1].r_offset;
++ pos[2] = info->contents + irel[2].r_offset;
++ pos[3] = info->contents + gpdisp->r_offset;
++ pos[4] = pos[3] + gpdisp->r_addend;
++
++ /* Beware of the compiler hoisting part of the sequence out a loop
++ and adjusting the destination register for the TLSGD insn. If this
++ happens, there will be a move into $16 before the JSR insn, so only
++ transformations of the first insn pair should use this register. */
++ tlsgd_reg = bfd_get_32 (info->abfd, pos[0]);
++ tlsgd_reg = (tlsgd_reg >> 21) & 31;
++
++ /* Generally, the positions are not allowed to be out of order, lest the
++ modified insn sequence have different register lifetimes. We can make
++ an exception when pos 1 is adjacent to pos 0. */
++ if (pos[1] + 4 == pos[0])
++ {
++ bfd_byte *tmp = pos[0];
++ pos[0] = pos[1];
++ pos[1] = tmp;
++ }
++ if (pos[1] >= pos[2] || pos[2] >= pos[3])
++ return true;
++
++ /* Reduce the use count on the LITERAL relocation. Do this before we
++ smash the symndx when we adjust the relocations below. */
++ {
++ struct sw64_elf_got_entry *lit_gotent;
++ struct sw64_elf_link_hash_entry *lit_h;
++ unsigned long indx;
++
++ BFD_ASSERT (ELF64_R_SYM (irel[1].r_info) >= info->symtab_hdr->sh_info);
++ indx = ELF64_R_SYM (irel[1].r_info) - info->symtab_hdr->sh_info;
++ lit_h = sw64_elf_sym_hashes (info->abfd)[indx];
++
++ while (lit_h->root.root.type == bfd_link_hash_indirect
++ || lit_h->root.root.type == bfd_link_hash_warning)
++ lit_h = (struct sw64_elf_link_hash_entry *) lit_h->root.root.u.i.link;
++
++ for (lit_gotent = lit_h->got_entries; lit_gotent ;
++ lit_gotent = lit_gotent->next)
++ if (lit_gotent->gotobj == info->gotobj
++ && lit_gotent->reloc_type == R_SW64_LITERAL
++ && lit_gotent->addend == irel[1].r_addend)
++ break;
++ BFD_ASSERT (lit_gotent);
++
++ if (--lit_gotent->use_count == 0)
++ {
++ int sz = sw64_got_entry_size (R_SW64_LITERAL);
++ sw64_elf_tdata (info->gotobj)->total_got_size -= sz;
++ }
++ }
++
++ /* Change
++
++ lda $16,x($gp) !tlsgd!1
++ ldq $27,__tls_get_addr($gp) !literal!1
++ jsr $26,($27),__tls_get_addr !lituse_tlsgd!1
++ ldah $29,0($26) !gpdisp!2
++ lda $29,0($29) !gpdisp!2
++ to
++ ldq $16,x($gp) !gottprel
++ unop
++ call_pal rduniq
++ addq $16,$0,$0
++ unop
++ or the first pair to
++ lda $16,x($gp) !tprel
++ unop
++ or
++ ldah $16,x($gp) !tprelhi
++ lda $16,x($16) !tprello
++
++ as appropriate. */
++
++ use_gottprel = false;
++ new_symndx = is_gd ? ELF64_R_SYM (irel->r_info) : STN_UNDEF;
++
++ /* Some compilers warn about a Boolean-looking expression being
++ used in a switch. The explicit cast silences them. */
++ switch ((int) (!dynamic && !bfd_link_pic (info->link_info)))
++ {
++ case 1:
++ {
++ bfd_vma tp_base;
++ bfd_signed_vma disp;
++
++ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
++ tp_base = sw64_get_tprel_base (info->link_info);
++ disp = symval - tp_base;
++
++ if (disp >= -0x8000 && disp < 0x8000)
++ {
++ insn = (OP_LDA << 26) | (tlsgd_reg << 21) | (31 << 16);
++ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
++ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
++
++ irel[0].r_offset = pos[0] - info->contents;
++ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW64_TPREL16);
++ irel[1].r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ break;
++ }
++ else if (disp >= -(bfd_signed_vma) 0x80000000
++ && disp < (bfd_signed_vma) 0x7fff8000
++ && pos[0] + 4 == pos[1])
++ {
++ insn = (OP_LDAH << 26) | (tlsgd_reg << 21) | (31 << 16);
++ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
++ insn = (OP_LDA << 26) | (tlsgd_reg << 21) | (tlsgd_reg << 16);
++ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[1]);
++
++ irel[0].r_offset = pos[0] - info->contents;
++ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW64_TPRELHI);
++ irel[1].r_offset = pos[1] - info->contents;
++ irel[1].r_info = ELF64_R_INFO (new_symndx, R_SW64_TPRELLO);
++ break;
++ }
++ }
++ /* FALLTHRU */
++
++ default:
++ use_gottprel = true;
++
++ insn = (OP_LDQ << 26) | (tlsgd_reg << 21) | (29 << 16);
++ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
++ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
++
++ irel[0].r_offset = pos[0] - info->contents;
++ irel[0].r_info = ELF64_R_INFO (new_symndx, R_SW64_GOTTPREL);
++ irel[1].r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ break;
++ }
++
++ bfd_put_32 (info->abfd, (bfd_vma) INSN_RDUNIQ, pos[2]);
++
++ insn = INSN_ADDQ | (16 << 21) | (0 << 16) | (0 << 0);
++ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[3]);
++
++ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[4]);
++
++ irel[2].r_info = ELF64_R_INFO (0, R_SW64_NONE);
++ gpdisp->r_info = ELF64_R_INFO (0, R_SW64_NONE);
++
++ hint = elf64_sw64_find_reloc_at_ofs (info->relocs, info->relend,
++ irel[2].r_offset, R_SW64_HINT);
++ if (hint)
++ hint->r_info = ELF64_R_INFO (0, R_SW64_NONE);
++
++ info->changed_contents = true;
++ info->changed_relocs = true;
++
++ /* Reduce the use count on the TLSGD/TLSLDM relocation. */
++ if (--info->gotent->use_count == 0)
++ {
++ int sz = sw64_got_entry_size (info->gotent->reloc_type);
++ sw64_elf_tdata (info->gotobj)->total_got_size -= sz;
++ if (!info->h)
++ sw64_elf_tdata (info->gotobj)->local_got_size -= sz;
++ }
++
++ /* If we've switched to a GOTTPREL relocation, increment the reference
++ count on that got entry. */
++ if (use_gottprel)
++ {
++ struct sw64_elf_got_entry *tprel_gotent;
++
++ for (tprel_gotent = *info->first_gotent; tprel_gotent ;
++ tprel_gotent = tprel_gotent->next)
++ if (tprel_gotent->gotobj == info->gotobj
++ && tprel_gotent->reloc_type == R_SW64_GOTTPREL
++ && tprel_gotent->addend == irel->r_addend)
++ break;
++ if (tprel_gotent)
++ tprel_gotent->use_count++;
++ else
++ {
++ if (info->gotent->use_count == 0)
++ tprel_gotent = info->gotent;
++ else
++ {
++ tprel_gotent = (struct sw64_elf_got_entry *)
++ bfd_alloc (info->abfd, sizeof (struct sw64_elf_got_entry));
++ if (!tprel_gotent)
++ return false;
++
++ tprel_gotent->next = *info->first_gotent;
++ *info->first_gotent = tprel_gotent;
++
++ tprel_gotent->gotobj = info->gotobj;
++ tprel_gotent->addend = irel->r_addend;
++ tprel_gotent->got_offset = -1;
++ tprel_gotent->reloc_done = 0;
++ tprel_gotent->reloc_xlated = 0;
++ }
++
++ tprel_gotent->use_count = 1;
++ tprel_gotent->reloc_type = R_SW64_GOTTPREL;
++ }
++ }
++
++ return true;
++}
++
++static bool
++elf64_sw64_relax_section (bfd *abfd, asection *sec,
++ struct bfd_link_info *link_info, bool *again)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Rela *internal_relocs;
++ Elf_Internal_Rela *irel, *irelend;
++ Elf_Internal_Sym *isymbuf = NULL;
++ struct sw64_elf_got_entry **local_got_entries;
++ struct sw64_relax_info info;
++ struct sw64_elf_link_hash_table * htab;
++ int relax_pass;
++
++ htab = sw64_elf_hash_table (link_info);
++ if (htab == NULL)
++ return false;
++
++ /* There's nothing to change, yet. */
++ *again = false;
++
++ if (bfd_link_relocatable (link_info)
++ || ((sec->flags & (SEC_CODE | SEC_RELOC | SEC_ALLOC | SEC_HAS_CONTENTS))
++ != (SEC_CODE | SEC_RELOC | SEC_ALLOC | SEC_HAS_CONTENTS))
++ || sec->reloc_count == 0)
++ return true;
++
++ BFD_ASSERT (is_sw64_elf (abfd));
++ relax_pass = link_info->relax_pass;
++
++ /* Make sure our GOT and PLT tables are up-to-date. */
++ if (htab->relax_trip != link_info->relax_trip)
++ {
++ htab->relax_trip = link_info->relax_trip;
++
++ /* This should never fail after the initial round, since the only error
++ is GOT overflow, and relaxation only shrinks the table. However, we
++ may only merge got sections during the first pass. If we merge
++ sections after we've created GPREL relocs, the GP for the merged
++ section backs up which may put the relocs out of range. */
++ if (!elf64_sw64_size_got_sections (link_info, relax_pass == 0))
++ abort ();
++ if (elf_hash_table (link_info)->dynamic_sections_created)
++ {
++ elf64_sw64_size_plt_section (link_info);
++ elf64_sw64_size_rela_got_section (link_info);
++ }
++ }
++
++ symtab_hdr = &elf_symtab_hdr (abfd);
++ local_got_entries = sw64_elf_tdata(abfd)->local_got_entries;
++
++ /* Load the relocations for this section. */
++ internal_relocs = (_bfd_elf_link_read_relocs
++ (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
++ link_info->keep_memory));
++ if (internal_relocs == NULL)
++ return false;
++
++ memset(&info, 0, sizeof (info));
++ info.abfd = abfd;
++ info.sec = sec;
++ info.link_info = link_info;
++ info.symtab_hdr = symtab_hdr;
++ info.relocs = internal_relocs;
++ info.relend = irelend = internal_relocs + sec->reloc_count;
++
++ /* Find the GP for this object. Do not store the result back via
++ _bfd_set_gp_value, since this could change again before final. */
++ info.gotobj = sw64_elf_tdata (abfd)->gotobj;
++ if (info.gotobj)
++ {
++ asection *sgot = sw64_elf_tdata (info.gotobj)->got;
++ info.gp = (sgot->output_section->vma
++ + sgot->output_offset
++ + 0x8000);
++ }
++
++ /* Get the section contents. */
++ if (elf_section_data (sec)->this_hdr.contents != NULL)
++ info.contents = elf_section_data (sec)->this_hdr.contents;
++ else
++ {
++ if (!bfd_malloc_and_get_section (abfd, sec, &info.contents))
++ goto error_return;
++ }
++
++ for (irel = internal_relocs; irel < irelend; irel++)
++ {
++ bfd_vma symval;
++ struct sw64_elf_got_entry *gotent;
++ unsigned long r_type = ELF64_R_TYPE (irel->r_info);
++ unsigned long r_symndx = ELF64_R_SYM (irel->r_info);
++
++ /* Early exit for unhandled or unrelaxable relocations. */
++ if (r_type != R_SW64_LITERAL)
++ {
++ /* We complete everything except LITERAL in the first pass. */
++ if (relax_pass != 0)
++ continue;
++ if (r_type == R_SW64_TLSLDM)
++ {
++ /* The symbol for a TLSLDM reloc is ignored. Collapse the
++ reloc to the STN_UNDEF (0) symbol so that they all match. */
++ r_symndx = STN_UNDEF;
++ }
++ else if (r_type != R_SW64_GOTDTPREL
++ && r_type != R_SW64_GOTTPREL
++ && r_type != R_SW64_TLSGD)
++ continue;
++ }
++
++ /* Get the value of the symbol referred to by the reloc. */
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ /* A local symbol. */
++ Elf_Internal_Sym *isym;
++
++ /* Read this BFD's local symbols. */
++ if (isymbuf == NULL)
++ {
++ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
++ if (isymbuf == NULL)
++ isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
++ symtab_hdr->sh_info, 0,
++ NULL, NULL, NULL);
++ if (isymbuf == NULL)
++ goto error_return;
++ }
++
++ isym = isymbuf + r_symndx;
++
++ /* Given the symbol for a TLSLDM reloc is ignored, this also
++ means forcing the symbol value to the tp base. */
++ if (r_type == R_SW64_TLSLDM)
++ {
++ info.tsec = bfd_abs_section_ptr;
++ symval = sw64_get_tprel_base (info.link_info);
++ }
++ else
++ {
++ symval = isym->st_value;
++ if (isym->st_shndx == SHN_UNDEF)
++ continue;
++ else if (isym->st_shndx == SHN_ABS)
++ info.tsec = bfd_abs_section_ptr;
++ else if (isym->st_shndx == SHN_COMMON)
++ info.tsec = bfd_com_section_ptr;
++ else
++ info.tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
++ }
++
++ info.h = NULL;
++ info.other = isym->st_other;
++ if (local_got_entries)
++ info.first_gotent = &local_got_entries[r_symndx];
++ else
++ {
++ info.first_gotent = &info.gotent;
++ info.gotent = NULL;
++ }
++ }
++ else
++ {
++ unsigned long indx;
++ struct sw64_elf_link_hash_entry *h;
++
++ indx = r_symndx - symtab_hdr->sh_info;
++ h = sw64_elf_sym_hashes (abfd)[indx];
++ BFD_ASSERT (h != NULL);
++
++ while (h->root.root.type == bfd_link_hash_indirect
++ || h->root.root.type == bfd_link_hash_warning)
++ h = (struct sw64_elf_link_hash_entry *)h->root.root.u.i.link;
++
++ /* If the symbol is undefined, we can't do anything with it. */
++ if (h->root.root.type == bfd_link_hash_undefined)
++ continue;
++
++ /* If the symbol isn't defined in the current module,
++ again we can't do anything. */
++ if (h->root.root.type == bfd_link_hash_undefweak)
++ {
++ info.tsec = bfd_abs_section_ptr;
++ symval = 0;
++ }
++ else if (!h->root.def_regular)
++ {
++ /* Except for TLSGD relocs, which can sometimes be
++ relaxed to GOTTPREL relocs. */
++ if (r_type != R_SW64_TLSGD)
++ continue;
++ info.tsec = bfd_abs_section_ptr;
++ symval = 0;
++ }
++ else
++ {
++ info.tsec = h->root.root.u.def.section;
++ symval = h->root.root.u.def.value;
++ }
++
++ info.h = h;
++ info.other = h->root.other;
++ info.first_gotent = &h->got_entries;
++ }
++
++ /* Search for the got entry to be used by this relocation. */
++ for (gotent = *info.first_gotent; gotent ; gotent = gotent->next)
++ if (gotent->gotobj == info.gotobj
++ && gotent->reloc_type == r_type
++ && gotent->addend == irel->r_addend)
++ break;
++ info.gotent = gotent;
++
++ symval += info.tsec->output_section->vma + info.tsec->output_offset;
++ symval += irel->r_addend;
++
++ switch (r_type)
++ {
++ case R_SW64_LITERAL:
++ BFD_ASSERT(info.gotent != NULL);
++
++ /* If there exist LITUSE relocations immediately following, this
++ opens up all sorts of interesting optimizations, because we
++ now know every location that this address load is used. */
++ if (irel+1 < irelend
++ && ELF64_R_TYPE (irel[1].r_info) == R_SW64_LITUSE)
++ {
++ if (!elf64_sw64_relax_with_lituse (&info, symval, irel))
++ goto error_return;
++ }
++ else
++ {
++ if (!elf64_sw64_relax_got_load (&info, symval, irel, r_type))
++ goto error_return;
++ }
++ break;
++
++ case R_SW64_GOTDTPREL:
++ case R_SW64_GOTTPREL:
++ BFD_ASSERT(info.gotent != NULL);
++ if (!elf64_sw64_relax_got_load (&info, symval, irel, r_type))
++ goto error_return;
++ break;
++
++ case R_SW64_TLSGD:
++ case R_SW64_TLSLDM:
++ BFD_ASSERT(info.gotent != NULL);
++ if (!elf64_sw64_relax_tls_get_addr (&info, symval, irel,
++ r_type == R_SW64_TLSGD))
++ goto error_return;
++ break;
++ }
++ }
++
++ if (isymbuf != NULL
++ && symtab_hdr->contents != (unsigned char *) isymbuf)
++ {
++ if (!link_info->keep_memory)
++ free (isymbuf);
++ else
++ {
++ /* Cache the symbols for elf_link_input_bfd. */
++ symtab_hdr->contents = (unsigned char *) isymbuf;
++ }
++ }
++
++ if (info.contents != NULL
++ && elf_section_data (sec)->this_hdr.contents != info.contents)
++ {
++ if (!info.changed_contents && !link_info->keep_memory)
++ free (info.contents);
++ else
++ {
++ /* Cache the section contents for elf_link_input_bfd. */
++ elf_section_data (sec)->this_hdr.contents = info.contents;
++ }
++ }
++
++ if (elf_section_data (sec)->relocs != internal_relocs)
++ {
++ if (!info.changed_relocs)
++ free (internal_relocs);
++ else
++ elf_section_data (sec)->relocs = internal_relocs;
++ }
++
++ *again = info.changed_contents || info.changed_relocs;
++
++ return true;
++
++ error_return:
++ if (symtab_hdr->contents != (unsigned char *) isymbuf)
++ free (isymbuf);
++ if (elf_section_data (sec)->this_hdr.contents != info.contents)
++ free (info.contents);
++ if (elf_section_data (sec)->relocs != internal_relocs)
++ free (internal_relocs);
++ return false;
++}
++
++/* Emit a dynamic relocation for (DYNINDX, RTYPE, ADDEND) at (SEC, OFFSET)
++ into the next available slot in SREL. */
++
++static void
++elf64_sw64_emit_dynrel (bfd *abfd, struct bfd_link_info *info,
++ asection *sec, asection *srel, bfd_vma offset,
++ long dynindx, long rtype, bfd_vma addend)
++{
++ Elf_Internal_Rela outrel;
++ bfd_byte *loc;
++
++ BFD_ASSERT (srel != NULL);
++
++ outrel.r_info = ELF64_R_INFO (dynindx, rtype);
++ outrel.r_addend = addend;
++
++ offset = _bfd_elf_section_offset (abfd, info, sec, offset);
++ if ((offset | 1) != (bfd_vma) -1)
++ outrel.r_offset = sec->output_section->vma + sec->output_offset + offset;
++ else
++ memset (&outrel, 0, sizeof (outrel));
++
++ loc = srel->contents;
++ loc += srel->reloc_count++ * sizeof (Elf64_External_Rela);
++ bfd_elf64_swap_reloca_out (abfd, &outrel, loc);
++ BFD_ASSERT (sizeof (Elf64_External_Rela) * srel->reloc_count <= srel->size);
++}
++
++/* Relocate an SW64 ELF section for a relocatable link.
++
++ We don't have to change anything unless the reloc is against a section
++ symbol, in which case we have to adjust according to where the section
++ symbol winds up in the output section. */
++
++static int
++elf64_sw64_relocate_section_r (bfd *output_bfd ATTRIBUTE_UNUSED,
++ struct bfd_link_info *info ATTRIBUTE_UNUSED,
++ bfd *input_bfd, asection *input_section,
++ bfd_byte *contents ATTRIBUTE_UNUSED,
++ Elf_Internal_Rela *relocs,
++ Elf_Internal_Sym *local_syms,
++ asection **local_sections)
++{
++ unsigned long symtab_hdr_sh_info;
++ Elf_Internal_Rela *rel;
++ Elf_Internal_Rela *relend;
++ struct elf_link_hash_entry **sym_hashes;
++ bool ret_val = true;
++
++ symtab_hdr_sh_info = elf_symtab_hdr (input_bfd).sh_info;
++ sym_hashes = elf_sym_hashes (input_bfd);
++
++ relend = relocs + input_section->reloc_count;
++ for (rel = relocs; rel < relend; rel++)
++ {
++ unsigned long r_symndx;
++ Elf_Internal_Sym *sym;
++ asection *sec;
++ unsigned long r_type;
++
++ r_type = ELF64_R_TYPE (rel->r_info);
++ if (r_type >= R_SW64_max)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: unsupported relocation type %#x"),
++ input_bfd, (int) r_type);
++ bfd_set_error (bfd_error_bad_value);
++ ret_val = false;
++ continue;
++ }
++
++ /* The symbol associated with GPDISP and LITUSE is
++ immaterial. Only the addend is significant. */
++ if (r_type == R_SW64_GPDISP || r_type == R_SW64_LITUSE)
++ continue;
++
++ r_symndx = ELF64_R_SYM (rel->r_info);
++ if (r_symndx < symtab_hdr_sh_info)
++ {
++ sym = local_syms + r_symndx;
++ sec = local_sections[r_symndx];
++ }
++ else
++ {
++ struct elf_link_hash_entry *h;
++
++ h = sym_hashes[r_symndx - symtab_hdr_sh_info];
++
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++ if (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak)
++ continue;
++
++ sym = NULL;
++ sec = h->root.u.def.section;
++ }
++
++ if (sec != NULL && discarded_section (sec))
++ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
++ rel, 1, relend,
++ elf64_sw64_howto_table + r_type, 0,
++ contents);
++
++ if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
++ rel->r_addend += sec->output_offset;
++ }
++
++ return ret_val;
++}
++
++/* Relocate an SW64 ELF section. */
++
++static int
++elf64_sw64_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
++ bfd *input_bfd, asection *input_section,
++ bfd_byte *contents, Elf_Internal_Rela *relocs,
++ Elf_Internal_Sym *local_syms,
++ asection **local_sections)
++{
++ Elf_Internal_Shdr *symtab_hdr;
++ Elf_Internal_Rela *rel;
++ Elf_Internal_Rela *relend;
++ asection *sgot, *srel, *srelgot;
++ bfd *dynobj, *gotobj;
++ bfd_vma gp, tp_base, dtp_base;
++ struct sw64_elf_got_entry **local_got_entries;
++ bool ret_val;
++
++ BFD_ASSERT (is_sw64_elf (input_bfd));
++
++ /* Handle relocatable links with a smaller loop. */
++ if (bfd_link_relocatable (info))
++ return elf64_sw64_relocate_section_r (output_bfd, info, input_bfd,
++ input_section, contents, relocs,
++ local_syms, local_sections);
++
++ /* This is a final link. */
++
++ ret_val = true;
++
++ symtab_hdr = &elf_symtab_hdr (input_bfd);
++
++ dynobj = elf_hash_table (info)->dynobj;
++ srelgot = elf_hash_table (info)->srelgot;
++
++ if (input_section->flags & SEC_ALLOC)
++ {
++ const char *section_name;
++ section_name = (bfd_elf_string_from_elf_section
++ (input_bfd, elf_elfheader(input_bfd)->e_shstrndx,
++ _bfd_elf_single_rel_hdr (input_section)->sh_name));
++ BFD_ASSERT(section_name != NULL);
++ srel = bfd_get_linker_section (dynobj, section_name);
++ }
++ else
++ srel = NULL;
++
++ /* Find the gp value for this input bfd. */
++ gotobj = sw64_elf_tdata (input_bfd)->gotobj;
++ if (gotobj)
++ {
++ sgot = sw64_elf_tdata (gotobj)->got;
++ gp = _bfd_get_gp_value (gotobj);
++ if (gp == 0)
++ {
++ gp = (sgot->output_section->vma
++ + sgot->output_offset
++ + 0x8000);
++ _bfd_set_gp_value (gotobj, gp);
++ }
++ }
++ else
++ {
++ sgot = NULL;
++ gp = 0;
++ }
++
++ local_got_entries = sw64_elf_tdata(input_bfd)->local_got_entries;
++
++ if (elf_hash_table (info)->tls_sec != NULL)
++ {
++ dtp_base = sw64_get_dtprel_base (info);
++ tp_base = sw64_get_tprel_base (info);
++ }
++ else
++ dtp_base = tp_base = 0;
++
++ relend = relocs + input_section->reloc_count;
++ for (rel = relocs; rel < relend; rel++)
++ {
++ struct sw64_elf_link_hash_entry *h = NULL;
++ struct sw64_elf_got_entry *gotent;
++ bfd_reloc_status_type r;
++ reloc_howto_type *howto;
++ unsigned long r_symndx;
++ Elf_Internal_Sym *sym = NULL;
++ asection *sec = NULL;
++ bfd_vma value;
++ bfd_vma addend;
++ bool dynamic_symbol_p;
++ bool unresolved_reloc = false;
++ bool undef_weak_ref = false;
++ unsigned long r_type;
++
++ r_type = ELF64_R_TYPE(rel->r_info);
++ if (r_type >= R_SW64_max)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: unsupported relocation type %#x"),
++ input_bfd, (int) r_type);
++ bfd_set_error (bfd_error_bad_value);
++ ret_val = false;
++ continue;
++ }
++
++ howto = elf64_sw64_howto_table + r_type;
++ r_symndx = ELF64_R_SYM(rel->r_info);
++
++ /* The symbol for a TLSLDM reloc is ignored. Collapse the
++ reloc to the STN_UNDEF (0) symbol so that they all match. */
++ if (r_type == R_SW64_TLSLDM)
++ r_symndx = STN_UNDEF;
++
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ asection *msec;
++ sym = local_syms + r_symndx;
++ sec = local_sections[r_symndx];
++ msec = sec;
++ value = _bfd_elf_rela_local_sym (output_bfd, sym, &msec, rel);
++
++ /* If this is a tp-relative relocation against sym STN_UNDEF (0),
++ this is hackery from relax_section. Force the value to
++ be the tls module base. */
++ if (r_symndx == STN_UNDEF
++ && (r_type == R_SW64_TLSLDM
++ || r_type == R_SW64_GOTTPREL
++ || r_type == R_SW64_TPREL64
++ || r_type == R_SW64_TPRELHI
++ || r_type == R_SW64_TPRELLO
++ || r_type == R_SW64_TPREL16))
++ value = dtp_base;
++
++ if (local_got_entries)
++ gotent = local_got_entries[r_symndx];
++ else
++ gotent = NULL;
++
++ /* Need to adjust local GOT entries' addends for SEC_MERGE
++ unless it has been done already. */
++ if ((sec->flags & SEC_MERGE)
++ && ELF_ST_TYPE (sym->st_info) == STT_SECTION
++ && sec->sec_info_type == SEC_INFO_TYPE_MERGE
++ && gotent
++ && !gotent->reloc_xlated)
++ {
++ struct sw64_elf_got_entry *ent;
++
++ for (ent = gotent; ent; ent = ent->next)
++ {
++ ent->reloc_xlated = 1;
++ if (ent->use_count == 0)
++ continue;
++ msec = sec;
++ ent->addend =
++ _bfd_merged_section_offset (output_bfd, &msec,
++ elf_section_data (sec)->
++ sec_info,
++ sym->st_value + ent->addend);
++ ent->addend -= sym->st_value;
++ ent->addend += msec->output_section->vma
++ + msec->output_offset
++ - sec->output_section->vma
++ - sec->output_offset;
++ }
++ }
++
++ dynamic_symbol_p = false;
++ }
++ else
++ {
++ bool warned, ignored;
++ struct elf_link_hash_entry *hh;
++ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
++
++ RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
++ r_symndx, symtab_hdr, sym_hashes,
++ hh, sec, value,
++ unresolved_reloc, warned, ignored);
++
++ if (warned)
++ continue;
++
++ if (value == 0
++ && ! unresolved_reloc
++ && hh->root.type == bfd_link_hash_undefweak)
++ undef_weak_ref = true;
++
++ h = (struct sw64_elf_link_hash_entry *) hh;
++ dynamic_symbol_p = sw64_elf_dynamic_symbol_p (&h->root, info);
++ gotent = h->got_entries;
++ }
++
++ if (sec != NULL && discarded_section (sec))
++ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
++ rel, 1, relend, howto, 0, contents);
++
++ addend = rel->r_addend;
++ value += addend;
++
++ /* Search for the proper got entry. */
++ for (; gotent ; gotent = gotent->next)
++ if (gotent->gotobj == gotobj
++ && gotent->reloc_type == r_type
++ && gotent->addend == addend)
++ break;
++
++ switch (r_type)
++ {
++ case R_SW64_GPDISP:
++ {
++ bfd_byte *p_ldah, *p_lda;
++
++ BFD_ASSERT(gp != 0);
++
++ value = (input_section->output_section->vma
++ + input_section->output_offset
++ + rel->r_offset);
++
++ p_ldah = contents + rel->r_offset;
++ p_lda = p_ldah + rel->r_addend;
++
++ r = elf64_sw64_do_reloc_gpdisp (input_bfd, gp - value,
++ p_ldah, p_lda);
++ }
++ break;
++
++ case R_SW64_LITERAL:
++ BFD_ASSERT(sgot != NULL);
++ BFD_ASSERT(gp != 0);
++ BFD_ASSERT(gotent != NULL);
++ BFD_ASSERT(gotent->use_count >= 1);
++
++ if (!gotent->reloc_done)
++ {
++ gotent->reloc_done = 1;
++
++ bfd_put_64 (output_bfd, value,
++ sgot->contents + gotent->got_offset);
++
++ /* If the symbol has been forced local, output a
++ RELATIVE reloc, otherwise it will be handled in
++ finish_dynamic_symbol. */
++ if (bfd_link_pic (info)
++ && !dynamic_symbol_p
++ && !undef_weak_ref)
++ elf64_sw64_emit_dynrel (output_bfd, info, sgot, srelgot,
++ gotent->got_offset, 0,
++ R_SW64_RELATIVE, value);
++ }
++
++ value = (sgot->output_section->vma
++ + sgot->output_offset
++ + gotent->got_offset);
++ value -= gp;
++ goto default_reloc;
++
++ case R_SW64_GPREL32:
++ case R_SW64_GPREL16:
++ case R_SW64_GPRELLOW:
++ if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: gp-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ BFD_ASSERT(gp != 0);
++ value -= gp;
++ goto default_reloc;
++
++ case R_SW64_GPRELHIGH:
++ if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: gp-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ BFD_ASSERT(gp != 0);
++ value -= gp;
++ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
++ goto default_reloc;
++
++ case R_SW64_HINT:
++ /* A call to a dynamic symbol is definitely out of range of
++ the 16-bit displacement. Don't bother writing anything. */
++ if (dynamic_symbol_p)
++ {
++ r = bfd_reloc_ok;
++ break;
++ }
++ /* The regular PC-relative stuff measures from the start of
++ the instruction rather than the end. */
++ value -= 4;
++ goto default_reloc;
++
++ case R_SW64_BRADDR:
++ if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: pc-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ /* The regular PC-relative stuff measures from the start of
++ the instruction rather than the end. */
++ value -= 4;
++ goto default_reloc;
++
++ case R_SW64_BRSGP:
++ {
++ int other;
++ const char *name;
++
++ /* The regular PC-relative stuff measures from the start of
++ the instruction rather than the end. */
++ value -= 4;
++
++ /* The source and destination gp must be the same. Note that
++ the source will always have an assigned gp, since we forced
++ one in check_relocs, but that the destination may not, as
++ it might not have had any relocations at all. Also take
++ care not to crash if H is an undefined symbol. */
++ if (h != NULL && sec != NULL
++ && sw64_elf_tdata (sec->owner)->gotobj
++ && gotobj != sw64_elf_tdata (sec->owner)->gotobj)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: change in gp: BRSGP %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++
++ /* The symbol should be marked either NOPV or STD_GPLOAD. */
++ if (h != NULL)
++ other = h->root.other;
++ else
++ other = sym->st_other;
++ switch (other & STO_SW64_STD_GPLOAD)
++ {
++ case STO_SW64_NOPV:
++ break;
++ case STO_SW64_STD_GPLOAD:
++ value += 8;
++ break;
++ default:
++ if (h != NULL)
++ name = h->root.root.root.string;
++ else
++ {
++ name = (bfd_elf_string_from_elf_section
++ (input_bfd, symtab_hdr->sh_link, sym->st_name));
++ if (name == NULL)
++ name = _("");
++ else if (name[0] == 0)
++ name = bfd_section_name (sec);
++ }
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: !samegp reloc against symbol without .prologue: %s"),
++ input_bfd, name);
++ ret_val = false;
++ break;
++ }
++
++ goto default_reloc;
++ }
++
++ case R_SW64_REFLONG:
++ case R_SW64_REFQUAD:
++ case R_SW64_DTPREL64:
++ case R_SW64_TPREL64:
++ {
++ long dynindx, dyntype = r_type;
++ bfd_vma dynaddend;
++
++ /* Careful here to remember RELATIVE relocations for global
++ variables for symbolic shared objects. */
++
++ if (dynamic_symbol_p)
++ {
++ BFD_ASSERT(h->root.dynindx != -1);
++ dynindx = h->root.dynindx;
++ dynaddend = addend;
++ addend = 0, value = 0;
++ }
++ else if (r_type == R_SW64_DTPREL64)
++ {
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ value -= dtp_base;
++ goto default_reloc;
++ }
++ else if (r_type == R_SW64_TPREL64)
++ {
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ if (!bfd_link_dll (info))
++ {
++ value -= tp_base;
++ goto default_reloc;
++ }
++ dynindx = 0;
++ dynaddend = value - dtp_base;
++ }
++ else if (bfd_link_pic (info)
++ && r_symndx != STN_UNDEF
++ && (input_section->flags & SEC_ALLOC)
++ && !undef_weak_ref
++ && !(unresolved_reloc
++ && (_bfd_elf_section_offset (output_bfd, info,
++ input_section,
++ rel->r_offset)
++ == (bfd_vma) -1)))
++ {
++ if (r_type == R_SW64_REFLONG)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: unhandled dynamic relocation against %s"),
++ input_bfd,
++ h->root.root.root.string);
++ ret_val = false;
++ }
++ dynindx = 0;
++ dyntype = R_SW64_RELATIVE;
++ dynaddend = value;
++ }
++ else
++ goto default_reloc;
++
++ if (input_section->flags & SEC_ALLOC)
++ elf64_sw64_emit_dynrel (output_bfd, info, input_section,
++ srel, rel->r_offset, dynindx,
++ dyntype, dynaddend);
++ }
++ goto default_reloc;
++
++ case R_SW64_SREL16:
++ case R_SW64_SREL32:
++ case R_SW64_SREL64:
++ if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: pc-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ else if (bfd_link_pic (info)
++ && undef_weak_ref)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: pc-relative relocation against undefined weak symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++
++
++ /* ??? .eh_frame references to discarded sections will be smashed
++ to relocations against SHN_UNDEF. The .eh_frame format allows
++ NULL to be encoded as 0 in any format, so this works here. */
++ if (r_symndx == STN_UNDEF
++ || (unresolved_reloc
++ && _bfd_elf_section_offset (output_bfd, info,
++ input_section,
++ rel->r_offset) == (bfd_vma) -1))
++ howto = (elf64_sw64_howto_table
++ + (r_type - R_SW64_SREL32 + R_SW64_REFLONG));
++ goto default_reloc;
++
++ case R_SW64_TLSLDM:
++ /* Ignore the symbol for the relocation. The result is always
++ the current module. */
++ dynamic_symbol_p = 0;
++ /* FALLTHRU */
++
++ case R_SW64_TLSGD:
++ if (!gotent->reloc_done)
++ {
++ gotent->reloc_done = 1;
++
++ /* Note that the module index for the main program is 1. */
++ bfd_put_64 (output_bfd,
++ !bfd_link_pic (info) && !dynamic_symbol_p,
++ sgot->contents + gotent->got_offset);
++
++ /* If the symbol has been forced local, output a
++ DTPMOD64 reloc, otherwise it will be handled in
++ finish_dynamic_symbol. */
++ if (bfd_link_pic (info) && !dynamic_symbol_p)
++ elf64_sw64_emit_dynrel (output_bfd, info, sgot, srelgot,
++ gotent->got_offset, 0,
++ R_SW64_DTPMOD64, 0);
++
++ if (dynamic_symbol_p || r_type == R_SW64_TLSLDM)
++ value = 0;
++ else
++ {
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ value -= dtp_base;
++ }
++ bfd_put_64 (output_bfd, value,
++ sgot->contents + gotent->got_offset + 8);
++ }
++
++ value = (sgot->output_section->vma
++ + sgot->output_offset
++ + gotent->got_offset);
++ value -= gp;
++ goto default_reloc;
++
++ case R_SW64_DTPRELHI:
++ case R_SW64_DTPRELLO:
++ case R_SW64_DTPREL16:
++ if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: dtp-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ value -= dtp_base;
++ if (r_type == R_SW64_DTPRELHI)
++ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
++ goto default_reloc;
++
++ case R_SW64_TPRELHI:
++ case R_SW64_TPRELLO:
++ case R_SW64_TPREL16:
++ if (bfd_link_dll (info))
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: TLS local exec code cannot be linked into shared objects"),
++ input_bfd);
++ ret_val = false;
++ }
++ else if (dynamic_symbol_p)
++ {
++ _bfd_error_handler
++ /* xgettext:c-format */
++ (_("%pB: tp-relative relocation against dynamic symbol %s"),
++ input_bfd, h->root.root.root.string);
++ ret_val = false;
++ }
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ value -= tp_base;
++ if (r_type == R_SW64_TPRELHI)
++ value = ((bfd_signed_vma) value >> 16) + ((value >> 15) & 1);
++ goto default_reloc;
++
++ case R_SW64_GOTDTPREL:
++ case R_SW64_GOTTPREL:
++ BFD_ASSERT(sgot != NULL);
++ BFD_ASSERT(gp != 0);
++ BFD_ASSERT(gotent != NULL);
++ BFD_ASSERT(gotent->use_count >= 1);
++
++ if (!gotent->reloc_done)
++ {
++ gotent->reloc_done = 1;
++
++ if (dynamic_symbol_p)
++ value = 0;
++ else
++ {
++ BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
++ if (r_type == R_SW64_GOTDTPREL)
++ value -= dtp_base;
++ else if (bfd_link_executable (info))
++ value -= tp_base;
++ else
++ {
++ elf64_sw64_emit_dynrel (output_bfd, info, sgot, srelgot,
++ gotent->got_offset, 0,
++ R_SW64_TPREL64,
++ value - dtp_base);
++ value = 0;
++ }
++ }
++ bfd_put_64 (output_bfd, value,
++ sgot->contents + gotent->got_offset);
++ }
++
++ value = (sgot->output_section->vma
++ + sgot->output_offset
++ + gotent->got_offset);
++ value -= gp;
++ goto default_reloc;
++
++ default:
++ default_reloc:
++ r = _bfd_final_link_relocate (howto, input_bfd, input_section,
++ contents, rel->r_offset, value, 0);
++ break;
++ }
++
++ switch (r)
++ {
++ case bfd_reloc_ok:
++ break;
++
++ case bfd_reloc_overflow:
++ {
++ const char *name;
++
++ /* Don't warn if the overflow is due to pc relative reloc
++ against discarded section. Section optimization code should
++ handle it. */
++
++ if (r_symndx < symtab_hdr->sh_info
++ && sec != NULL && howto->pc_relative
++ && discarded_section (sec))
++ break;
++
++ if (h != NULL)
++ name = NULL;
++ else
++ {
++ name = (bfd_elf_string_from_elf_section
++ (input_bfd, symtab_hdr->sh_link, sym->st_name));
++ if (name == NULL)
++ return false;
++ if (*name == '\0')
++ name = bfd_section_name (sec);
++ }
++ (*info->callbacks->reloc_overflow)
++ (info, (h ? &h->root.root : NULL), name, howto->name,
++ (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
++ }
++ break;
++
++ default:
++ case bfd_reloc_outofrange:
++ abort ();
++ }
++ }
++
++ return ret_val;
++}
++
++/* Finish up dynamic symbol handling. We set the contents of various
++ dynamic sections here. */
++
++static bool
++elf64_sw64_finish_dynamic_symbol (bfd *output_bfd, struct bfd_link_info *info,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym)
++{
++ struct sw64_elf_link_hash_entry *ah = (struct sw64_elf_link_hash_entry *)h;
++
++ if (h->needs_plt)
++ {
++ /* Fill in the .plt entry for this symbol. */
++ asection *splt, *sgot, *srel;
++ Elf_Internal_Rela outrel;
++ bfd_byte *loc;
++ bfd_vma got_addr, plt_addr;
++ bfd_vma plt_index;
++ struct sw64_elf_got_entry *gotent;
++
++ BFD_ASSERT (h->dynindx != -1);
++
++ splt = elf_hash_table (info)->splt;
++ BFD_ASSERT (splt != NULL);
++ srel = elf_hash_table (info)->srelplt;
++ BFD_ASSERT (srel != NULL);
++
++ for (gotent = ah->got_entries; gotent ; gotent = gotent->next)
++ if (gotent->reloc_type == R_SW64_LITERAL
++ && gotent->use_count > 0)
++ {
++ unsigned int insn;
++ int disp;
++
++ sgot = sw64_elf_tdata (gotent->gotobj)->got;
++ BFD_ASSERT (sgot != NULL);
++
++ BFD_ASSERT (gotent->got_offset != -1);
++ BFD_ASSERT (gotent->plt_offset != -1);
++
++ got_addr = (sgot->output_section->vma
++ + sgot->output_offset
++ + gotent->got_offset);
++ plt_addr = (splt->output_section->vma
++ + splt->output_offset
++ + gotent->plt_offset);
++
++ plt_index = (gotent->plt_offset-PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
++
++ /* Fill in the entry in the procedure linkage table. */
++ if (elf64_sw64_use_secureplt)
++ {
++ disp = (PLT_HEADER_SIZE - 4) - (gotent->plt_offset + 4);
++ insn = INSN_AD (INSN_BR, 31, disp);
++ bfd_put_32 (output_bfd, insn,
++ splt->contents + gotent->plt_offset);
++
++ plt_index = ((gotent->plt_offset - NEW_PLT_HEADER_SIZE)
++ / NEW_PLT_ENTRY_SIZE);
++ }
++ else
++ {
++ disp = -(gotent->plt_offset + 4);
++ insn = INSN_AD (INSN_BR, 28, disp);
++ bfd_put_32 (output_bfd, insn,
++ splt->contents + gotent->plt_offset);
++ bfd_put_32 (output_bfd, INSN_UNOP,
++ splt->contents + gotent->plt_offset + 4);
++ bfd_put_32 (output_bfd, INSN_UNOP,
++ splt->contents + gotent->plt_offset + 8);
++
++ plt_index = ((gotent->plt_offset - OLD_PLT_HEADER_SIZE)
++ / OLD_PLT_ENTRY_SIZE);
++ }
++
++ /* Fill in the entry in the .rela.plt section. */
++ outrel.r_offset = got_addr;
++ outrel.r_info = ELF64_R_INFO(h->dynindx, R_SW64_JMP_SLOT);
++ outrel.r_addend = 0;
++
++ loc = srel->contents + plt_index * sizeof (Elf64_External_Rela);
++ bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
++
++ /* Fill in the entry in the .got. */
++ bfd_put_64 (output_bfd, plt_addr,
++ sgot->contents + gotent->got_offset);
++ }
++ }
++ else if (sw64_elf_dynamic_symbol_p (h, info))
++ {
++ /* Fill in the dynamic relocations for this symbol's .got entries. */
++ asection *srel;
++ struct sw64_elf_got_entry *gotent;
++
++ srel = elf_hash_table (info)->srelgot;
++ BFD_ASSERT (srel != NULL);
++
++ for (gotent = ((struct sw64_elf_link_hash_entry *) h)->got_entries;
++ gotent != NULL;
++ gotent = gotent->next)
++ {
++ asection *sgot;
++ long r_type;
++
++ if (gotent->use_count == 0)
++ continue;
++
++ sgot = sw64_elf_tdata (gotent->gotobj)->got;
++
++ r_type = gotent->reloc_type;
++ switch (r_type)
++ {
++ case R_SW64_LITERAL:
++ r_type = R_SW64_GLOB_DAT;
++ break;
++ case R_SW64_TLSGD:
++ r_type = R_SW64_DTPMOD64;
++ break;
++ case R_SW64_GOTDTPREL:
++ r_type = R_SW64_DTPREL64;
++ break;
++ case R_SW64_GOTTPREL:
++ r_type = R_SW64_TPREL64;
++ break;
++ case R_SW64_TLSLDM:
++ default:
++ abort ();
++ }
++
++ elf64_sw64_emit_dynrel (output_bfd, info, sgot, srel,
++ gotent->got_offset, h->dynindx,
++ r_type, gotent->addend);
++
++ if (gotent->reloc_type == R_SW64_TLSGD)
++ elf64_sw64_emit_dynrel (output_bfd, info, sgot, srel,
++ gotent->got_offset + 8, h->dynindx,
++ R_SW64_DTPREL64, gotent->addend);
++ }
++ }
++
++ /* Mark some specially defined symbols as absolute. */
++ if (h == elf_hash_table (info)->hdynamic
++ || h == elf_hash_table (info)->hgot
++ || h == elf_hash_table (info)->hplt)
++ sym->st_shndx = SHN_ABS;
++
++ return true;
++}
++
++/* Finish up the dynamic sections. */
++
++static bool
++elf64_sw64_finish_dynamic_sections (bfd *output_bfd,
++ struct bfd_link_info *info)
++{
++ bfd *dynobj;
++ asection *sdyn;
++
++ dynobj = elf_hash_table (info)->dynobj;
++ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ asection *splt, *sgotplt, *srelaplt;
++ Elf64_External_Dyn *dyncon, *dynconend;
++ bfd_vma plt_vma, gotplt_vma;
++
++ splt = elf_hash_table (info)->splt;
++ srelaplt = elf_hash_table (info)->srelplt;
++ BFD_ASSERT (splt != NULL && sdyn != NULL);
++
++ plt_vma = splt->output_section->vma + splt->output_offset;
++
++ gotplt_vma = 0;
++ if (elf64_sw64_use_secureplt)
++ {
++ sgotplt = elf_hash_table (info)->sgotplt;
++ BFD_ASSERT (sgotplt != NULL);
++ if (sgotplt->size > 0)
++ gotplt_vma = sgotplt->output_section->vma + sgotplt->output_offset;
++ }
++
++ dyncon = (Elf64_External_Dyn *) sdyn->contents;
++ dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
++ for (; dyncon < dynconend; dyncon++)
++ {
++ Elf_Internal_Dyn dyn;
++
++ bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
++
++ switch (dyn.d_tag)
++ {
++ case DT_PLTGOT:
++ dyn.d_un.d_ptr
++ = elf64_sw64_use_secureplt ? gotplt_vma : plt_vma;
++ break;
++ case DT_PLTRELSZ:
++ dyn.d_un.d_val = srelaplt ? srelaplt->size : 0;
++ break;
++ case DT_JMPREL:
++ dyn.d_un.d_ptr = srelaplt ? (srelaplt->output_section->vma
++ + srelaplt->output_offset) : 0;
++ break;
++ }
++
++ bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
++ }
++
++ /* Initialize the plt header. */
++ if (splt->size > 0)
++ {
++ unsigned int insn;
++ int ofs;
++
++ if (elf64_sw64_use_secureplt)
++ {
++ ofs = gotplt_vma - (plt_vma + PLT_HEADER_SIZE);
++
++ insn = INSN_ABC (INSN_SUBQ, 27, 28, 25);
++ bfd_put_32 (output_bfd, insn, splt->contents);
++
++ insn = INSN_ABO (INSN_LDAH, 28, 28, (ofs + 0x8000) >> 16);
++ bfd_put_32 (output_bfd, insn, splt->contents + 4);
++
++ insn = INSN_ABC (INSN_S4SUBQ, 25, 25, 25);
++ bfd_put_32 (output_bfd, insn, splt->contents + 8);
++
++ insn = INSN_ABO (INSN_LDA, 28, 28, ofs);
++ bfd_put_32 (output_bfd, insn, splt->contents + 12);
++
++ insn = INSN_ABO (INSN_LDQ, 27, 28, 0);
++ bfd_put_32 (output_bfd, insn, splt->contents + 16);
++
++ insn = INSN_ABC (INSN_ADDQ, 25, 25, 25);
++ bfd_put_32 (output_bfd, insn, splt->contents + 20);
++
++ insn = INSN_ABO (INSN_LDQ, 28, 28, 8);
++ bfd_put_32 (output_bfd, insn, splt->contents + 24);
++
++ insn = INSN_AB (INSN_JMP, 31, 27);
++ bfd_put_32 (output_bfd, insn, splt->contents + 28);
++
++ insn = INSN_AD (INSN_BR, 28, -PLT_HEADER_SIZE);
++ bfd_put_32 (output_bfd, insn, splt->contents + 32);
++ }
++ else
++ {
++ insn = INSN_AD (INSN_BR, 27, 0); /* br $27, .+4 */
++ bfd_put_32 (output_bfd, insn, splt->contents);
++
++ insn = INSN_ABO (INSN_LDQ, 27, 27, 12);
++ bfd_put_32 (output_bfd, insn, splt->contents + 4);
++
++ insn = INSN_UNOP;
++ bfd_put_32 (output_bfd, insn, splt->contents + 8);
++
++ insn = INSN_AB (INSN_JMP, 27, 27);
++ bfd_put_32 (output_bfd, insn, splt->contents + 12);
++
++ /* The next two words will be filled in by ld.so. */
++ bfd_put_64 (output_bfd, 0, splt->contents + 16);
++ bfd_put_64 (output_bfd, 0, splt->contents + 24);
++ }
++
++ elf_section_data (splt->output_section)->this_hdr.sh_entsize = 0;
++ }
++ }
++
++ return true;
++}
++
++/* We need to use a special link routine to handle the .mdebug section.
++ We need to merge all instances of these sections together, not write
++ them all out sequentially. */
++
++static bool
++elf64_sw64_final_link (bfd *abfd, struct bfd_link_info *info)
++{
++ asection *o;
++ struct bfd_link_order *p;
++ asection *mdebug_sec;
++ struct ecoff_debug_info debug;
++ const struct ecoff_debug_swap *swap
++ = get_elf_backend_data (abfd)->elf_backend_ecoff_debug_swap;
++ HDRR *symhdr = &debug.symbolic_header;
++ void * mdebug_handle = NULL;
++ struct sw64_elf_link_hash_table * htab;
++
++ htab = sw64_elf_hash_table (info);
++ if (htab == NULL)
++ return false;
++
++ /* Go through the sections and collect the mdebug information. */
++ mdebug_sec = NULL;
++ for (o = abfd->sections; o != (asection *) NULL; o = o->next)
++ {
++ if (strcmp (o->name, ".mdebug") == 0)
++ {
++ struct extsym_info einfo;
++
++ /* We have found the .mdebug section in the output file.
++ Look through all the link_orders comprising it and merge
++ the information together. */
++ symhdr->magic = swap->sym_magic;
++ /* FIXME: What should the version stamp be? */
++ symhdr->vstamp = 0;
++ symhdr->ilineMax = 0;
++ symhdr->cbLine = 0;
++ symhdr->idnMax = 0;
++ symhdr->ipdMax = 0;
++ symhdr->isymMax = 0;
++ symhdr->ioptMax = 0;
++ symhdr->iauxMax = 0;
++ symhdr->issMax = 0;
++ symhdr->issExtMax = 0;
++ symhdr->ifdMax = 0;
++ symhdr->crfd = 0;
++ symhdr->iextMax = 0;
++
++ /* We accumulate the debugging information itself in the
++ debug_info structure. */
++ debug.line = NULL;
++ debug.external_dnr = NULL;
++ debug.external_pdr = NULL;
++ debug.external_sym = NULL;
++ debug.external_opt = NULL;
++ debug.external_aux = NULL;
++ debug.ss = NULL;
++ debug.ssext = debug.ssext_end = NULL;
++ debug.external_fdr = NULL;
++ debug.external_rfd = NULL;
++ debug.external_ext = debug.external_ext_end = NULL;
++
++ mdebug_handle = bfd_ecoff_debug_init (abfd, &debug, swap, info);
++ if (mdebug_handle == NULL)
++ return false;
++
++ if (1)
++ {
++ asection *s;
++ EXTR esym;
++ bfd_vma last = 0;
++ unsigned int i;
++ static const char * const name[] =
++ {
++ ".text", ".init", ".fini", ".data",
++ ".rodata", ".sdata", ".sbss", ".bss"
++ };
++ static const int sc[] = { scText, scInit, scFini, scData,
++ scRData, scSData, scSBss, scBss };
++
++ esym.jmptbl = 0;
++ esym.cobol_main = 0;
++ esym.weakext = 0;
++ esym.reserved = 0;
++ esym.ifd = ifdNil;
++ esym.asym.iss = issNil;
++ esym.asym.st = stLocal;
++ esym.asym.reserved = 0;
++ esym.asym.index = indexNil;
++ for (i = 0; i < 8; i++)
++ {
++ esym.asym.sc = sc[i];
++ s = bfd_get_section_by_name (abfd, name[i]);
++ if (s != NULL)
++ {
++ esym.asym.value = s->vma;
++ last = s->vma + s->size;
++ }
++ else
++ esym.asym.value = last;
++
++ if (! bfd_ecoff_debug_one_external (abfd, &debug, swap,
++ name[i], &esym))
++ return false;
++ }
++ }
++
++ for (p = o->map_head.link_order;
++ p != (struct bfd_link_order *) NULL;
++ p = p->next)
++ {
++ asection *input_section;
++ bfd *input_bfd;
++ const struct ecoff_debug_swap *input_swap;
++ struct ecoff_debug_info input_debug;
++ char *eraw_src;
++ char *eraw_end;
++
++ if (p->type != bfd_indirect_link_order)
++ {
++ if (p->type == bfd_data_link_order)
++ continue;
++ abort ();
++ }
++
++ input_section = p->u.indirect.section;
++ input_bfd = input_section->owner;
++
++ if (! is_sw64_elf (input_bfd))
++ /* I don't know what a non SW64 ELF bfd would be
++ doing with a .mdebug section, but I don't really
++ want to deal with it. */
++ continue;
++
++ input_swap = (get_elf_backend_data (input_bfd)
++ ->elf_backend_ecoff_debug_swap);
++
++ BFD_ASSERT (p->size == input_section->size);
++
++ /* The ECOFF linking code expects that we have already
++ read in the debugging information and set up an
++ ecoff_debug_info structure, so we do that now. */
++ if (!elf64_sw64_read_ecoff_info (input_bfd, input_section,
++ &input_debug))
++ return false;
++
++ if (! (bfd_ecoff_debug_accumulate
++ (mdebug_handle, abfd, &debug, swap, input_bfd,
++ &input_debug, input_swap, info)))
++ return false;
++
++ /* Loop through the external symbols. For each one with
++ interesting information, try to find the symbol in
++ the linker global hash table and save the information
++ for the output external symbols. */
++ eraw_src = (char *) input_debug.external_ext;
++ eraw_end = (eraw_src
++ + (input_debug.symbolic_header.iextMax
++ * input_swap->external_ext_size));
++ for (;
++ eraw_src < eraw_end;
++ eraw_src += input_swap->external_ext_size)
++ {
++ EXTR ext;
++ const char *name;
++ struct sw64_elf_link_hash_entry *h;
++
++ (*input_swap->swap_ext_in) (input_bfd, eraw_src, &ext);
++ if (ext.asym.sc == scNil
++ || ext.asym.sc == scUndefined
++ || ext.asym.sc == scSUndefined)
++ continue;
++
++ name = input_debug.ssext + ext.asym.iss;
++ h = sw64_elf_link_hash_lookup (htab, name, false, false, true);
++ if (h == NULL || h->esym.ifd != -2)
++ continue;
++
++ if (ext.ifd != -1)
++ {
++ BFD_ASSERT (ext.ifd
++ < input_debug.symbolic_header.ifdMax);
++ ext.ifd = input_debug.ifdmap[ext.ifd];
++ }
++
++ h->esym = ext;
++ }
++
++ /* Free up the information we just read. */
++ free (input_debug.line);
++ free (input_debug.external_dnr);
++ free (input_debug.external_pdr);
++ free (input_debug.external_sym);
++ free (input_debug.external_opt);
++ free (input_debug.external_aux);
++ free (input_debug.ss);
++ free (input_debug.ssext);
++ free (input_debug.external_fdr);
++ free (input_debug.external_rfd);
++ free (input_debug.external_ext);
++
++ /* Hack: reset the SEC_HAS_CONTENTS flag so that
++ elf_link_input_bfd ignores this section. */
++ input_section->flags &=~ SEC_HAS_CONTENTS;
++ }
++
++ /* Build the external symbol information. */
++ einfo.abfd = abfd;
++ einfo.info = info;
++ einfo.debug = &debug;
++ einfo.swap = swap;
++ einfo.failed = false;
++ elf_link_hash_traverse (elf_hash_table (info),
++ elf64_sw64_output_extsym,
++ &einfo);
++ if (einfo.failed)
++ return false;
++
++ /* Set the size of the .mdebug section. */
++ o->size = bfd_ecoff_debug_size (abfd, &debug, swap);
++
++ /* Skip this section later on (I don't think this currently
++ matters, but someday it might). */
++ o->map_head.link_order = (struct bfd_link_order *) NULL;
++
++ mdebug_sec = o;
++ }
++ }
++
++ /* Invoke the regular ELF backend linker to do all the work. */
++ if (! bfd_elf_final_link (abfd, info))
++ return false;
++
++ /* Now write out the computed sections. */
++
++ /* The .got subsections... */
++ {
++ bfd *i, *dynobj = elf_hash_table(info)->dynobj;
++ for (i = htab->got_list;
++ i != NULL;
++ i = sw64_elf_tdata(i)->got_link_next)
++ {
++ asection *sgot;
++
++ /* elf_bfd_final_link already did everything in dynobj. */
++ if (i == dynobj)
++ continue;
++
++ sgot = sw64_elf_tdata(i)->got;
++ if (! bfd_set_section_contents (abfd, sgot->output_section,
++ sgot->contents,
++ (file_ptr) sgot->output_offset,
++ sgot->size))
++ return false;
++ }
++ }
++
++ if (mdebug_sec != (asection *) NULL)
++ {
++ BFD_ASSERT (abfd->output_has_begun);
++ if (! bfd_ecoff_write_accumulated_debug (mdebug_handle, abfd, &debug,
++ swap, info,
++ mdebug_sec->filepos))
++ return false;
++
++ bfd_ecoff_debug_free (mdebug_handle, abfd, &debug, swap, info);
++ }
++
++ return true;
++}
++
++static enum elf_reloc_type_class
++elf64_sw64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
++ const asection *rel_sec ATTRIBUTE_UNUSED,
++ const Elf_Internal_Rela *rela)
++{
++ switch ((int) ELF64_R_TYPE (rela->r_info))
++ {
++ case R_SW64_RELATIVE:
++ return reloc_class_relative;
++ case R_SW64_JMP_SLOT:
++ return reloc_class_plt;
++ case R_SW64_COPY:
++ return reloc_class_copy;
++ default:
++ return reloc_class_normal;
++ }
++}
++
++static const struct bfd_elf_special_section elf64_sw64_special_sections[] =
++{
++ { STRING_COMMA_LEN (".sbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_SW64_GPREL },
++ { STRING_COMMA_LEN (".sdata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_SW64_GPREL },
++ { NULL, 0, 0, 0, 0 }
++};
++
++/* ECOFF swapping routines. These are used when dealing with the
++ .mdebug section, which is in the ECOFF debugging format. Copied
++ from elf32-mips.c. */
++static const struct ecoff_debug_swap
++elf64_sw64_ecoff_debug_swap =
++{
++ /* Symbol table magic number. */
++ magicSym2,
++ /* Alignment of debugging information. E.g., 4. */
++ 8,
++ /* Sizes of external symbolic information. */
++ sizeof (struct hdr_ext),
++ sizeof (struct dnr_ext),
++ sizeof (struct pdr_ext),
++ sizeof (struct sym_ext),
++ sizeof (struct opt_ext),
++ sizeof (struct fdr_ext),
++ sizeof (struct rfd_ext),
++ sizeof (struct ext_ext),
++ /* Functions to swap in external symbolic data. */
++ ecoff_swap_hdr_in,
++ ecoff_swap_dnr_in,
++ ecoff_swap_pdr_in,
++ ecoff_swap_sym_in,
++ ecoff_swap_opt_in,
++ ecoff_swap_fdr_in,
++ ecoff_swap_rfd_in,
++ ecoff_swap_ext_in,
++ _bfd_ecoff_swap_tir_in,
++ _bfd_ecoff_swap_rndx_in,
++ /* Functions to swap out external symbolic data. */
++ ecoff_swap_hdr_out,
++ ecoff_swap_dnr_out,
++ ecoff_swap_pdr_out,
++ ecoff_swap_sym_out,
++ ecoff_swap_opt_out,
++ ecoff_swap_fdr_out,
++ ecoff_swap_rfd_out,
++ ecoff_swap_ext_out,
++ _bfd_ecoff_swap_tir_out,
++ _bfd_ecoff_swap_rndx_out,
++ /* Function to read in symbolic data. */
++ elf64_sw64_read_ecoff_info
++};
++
++/* Use a non-standard hash bucket size of 8. */
++
++static const struct elf_size_info sw64_elf_size_info =
++{
++ sizeof (Elf64_External_Ehdr),
++ sizeof (Elf64_External_Phdr),
++ sizeof (Elf64_External_Shdr),
++ sizeof (Elf64_External_Rel),
++ sizeof (Elf64_External_Rela),
++ sizeof (Elf64_External_Sym),
++ sizeof (Elf64_External_Dyn),
++ sizeof (Elf_External_Note),
++ 8,
++ 1,
++ 64, 3,
++ ELFCLASS64, EV_CURRENT,
++ bfd_elf64_write_out_phdrs,
++ bfd_elf64_write_shdrs_and_ehdr,
++ bfd_elf64_checksum_contents,
++ bfd_elf64_write_relocs,
++ bfd_elf64_swap_symbol_in,
++ bfd_elf64_swap_symbol_out,
++ bfd_elf64_slurp_reloc_table,
++ bfd_elf64_slurp_symbol_table,
++ bfd_elf64_swap_dyn_in,
++ bfd_elf64_swap_dyn_out,
++ bfd_elf64_swap_reloc_in,
++ bfd_elf64_swap_reloc_out,
++ bfd_elf64_swap_reloca_in,
++ bfd_elf64_swap_reloca_out
++};
++
++#define TARGET_LITTLE_SYM sw64_elf64_vec
++#define TARGET_LITTLE_NAME "elf64-sw64"
++#define ELF_ARCH bfd_arch_sw64
++#define ELF_TARGET_ID SW64_ELF_DATA
++#define ELF_MACHINE_CODE EM_SW64
++#define ELF_MAXPAGESIZE 0x10000
++#define ELF_COMMONPAGESIZE 0x2000
++
++#define bfd_elf64_bfd_link_hash_table_create \
++ elf64_sw64_bfd_link_hash_table_create
++
++#define bfd_elf64_bfd_reloc_type_lookup \
++ elf64_sw64_bfd_reloc_type_lookup
++#define bfd_elf64_bfd_reloc_name_lookup \
++ elf64_sw64_bfd_reloc_name_lookup
++#define elf_info_to_howto \
++ elf64_sw64_info_to_howto
++
++#define bfd_elf64_mkobject \
++ elf64_sw64_mkobject
++#define elf_backend_object_p \
++ elf64_sw64_object_p
++
++#define elf_backend_section_from_shdr \
++ elf64_sw64_section_from_shdr
++#define elf_backend_section_flags \
++ elf64_sw64_section_flags
++#define elf_backend_fake_sections \
++ elf64_sw64_fake_sections
++
++#define bfd_elf64_bfd_is_local_label_name \
++ elf64_sw64_is_local_label_name
++#define bfd_elf64_find_nearest_line \
++ elf64_sw64_find_nearest_line
++#define bfd_elf64_bfd_relax_section \
++ elf64_sw64_relax_section
++
++#define elf_backend_add_symbol_hook \
++ elf64_sw64_add_symbol_hook
++#define elf_backend_relocs_compatible \
++ _bfd_elf_relocs_compatible
++#define elf_backend_sort_relocs_p \
++ elf64_sw64_sort_relocs_p
++#define elf_backend_check_relocs \
++ elf64_sw64_check_relocs
++#define elf_backend_create_dynamic_sections \
++ elf64_sw64_create_dynamic_sections
++#define elf_backend_adjust_dynamic_symbol \
++ elf64_sw64_adjust_dynamic_symbol
++#define elf_backend_merge_symbol_attribute \
++ elf64_sw64_merge_symbol_attribute
++#define elf_backend_copy_indirect_symbol \
++ elf64_sw64_copy_indirect_symbol
++#define elf_backend_always_size_sections \
++ elf64_sw64_always_size_sections
++#define elf_backend_size_dynamic_sections \
++ elf64_sw64_size_dynamic_sections
++#define elf_backend_omit_section_dynsym \
++ _bfd_elf_omit_section_dynsym_all
++#define elf_backend_relocate_section \
++ elf64_sw64_relocate_section
++#define elf_backend_finish_dynamic_symbol \
++ elf64_sw64_finish_dynamic_symbol
++#define elf_backend_finish_dynamic_sections \
++ elf64_sw64_finish_dynamic_sections
++#define bfd_elf64_bfd_final_link \
++ elf64_sw64_final_link
++#define elf_backend_reloc_type_class \
++ elf64_sw64_reloc_type_class
++
++#define elf_backend_can_gc_sections 1
++#define elf_backend_gc_mark_hook elf64_sw64_gc_mark_hook
++
++#define elf_backend_ecoff_debug_swap \
++ &elf64_sw64_ecoff_debug_swap
++
++#define elf_backend_size_info \
++ sw64_elf_size_info
++
++#define elf_backend_special_sections \
++ elf64_sw64_special_sections
++
++#define elf_backend_strip_zero_sized_dynamic_sections \
++ _bfd_elf_strip_zero_sized_dynamic_sections
++
++/* A few constants that determine how the .plt section is set up. */
++#define elf_backend_want_got_plt 0
++#define elf_backend_plt_readonly 0
++#define elf_backend_want_plt_sym 1
++#define elf_backend_got_header_size 0
++#define elf_backend_dtrel_excludes_plt 1
++
++#include "elf64-target.h"
++
++/* FreeBSD support. */
++
++#undef TARGET_LITTLE_SYM
++#define TARGET_LITTLE_SYM sw64_elf64_fbsd_vec
++#undef TARGET_LITTLE_NAME
++#define TARGET_LITTLE_NAME "elf64-sw64-freebsd"
++#undef ELF_OSABI
++#define ELF_OSABI ELFOSABI_FREEBSD
++
++/* The kernel recognizes executables as valid only if they carry a
++ "FreeBSD" label in the ELF header. So we put this label on all
++ executables and (for simplicity) also all other object files. */
++
++static bool
++elf64_sw64_fbsd_init_file_header (bfd *abfd, struct bfd_link_info *info)
++{
++ Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
++
++ if (!_bfd_elf_init_file_header (abfd, info))
++ return false;
++
++ i_ehdrp = elf_elfheader (abfd);
++
++ /* Put an ABI label supported by FreeBSD >= 4.1. */
++ i_ehdrp->e_ident[EI_OSABI] = get_elf_backend_data (abfd)->elf_osabi;
++#ifdef OLD_FREEBSD_ABI_LABEL
++ /* The ABI label supported by FreeBSD <= 4.0 is quite nonstandard. */
++ memcpy (&i_ehdrp->e_ident[EI_ABIVERSION], "FreeBSD", 8);
++#endif
++ return true;
++}
++
++#undef elf_backend_init_file_header
++#define elf_backend_init_file_header \
++ elf64_sw64_fbsd_init_file_header
++
++#undef elf64_bed
++#define elf64_bed elf64_sw64_fbsd_bed
++
++#include "elf64-target.h"
+diff -Naur gdb-14.1-after-patch/bfd/elf-bfd.h gdb-14.1-sw64/bfd/elf-bfd.h
+--- gdb-14.1-after-patch/bfd/elf-bfd.h 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/bfd/elf-bfd.h 2025-03-03 10:59:12.970000000 +0800
+@@ -533,6 +533,7 @@
+ {
+ AARCH64_ELF_DATA = 1,
+ ALPHA_ELF_DATA,
++ SW64_ELF_DATA,
+ AMDGCN_ELF_DATA,
+ ARC_ELF_DATA,
+ ARM_ELF_DATA,
+diff -Naur gdb-14.1-after-patch/bfd/libbfd.h gdb-14.1-sw64/bfd/libbfd.h
+--- gdb-14.1-after-patch/bfd/libbfd.h 2025-03-03 09:43:44.100000000 +0800
++++ gdb-14.1-sw64/bfd/libbfd.h 2025-03-03 10:59:13.020000000 +0800
+@@ -1237,6 +1237,35 @@
+ "BFD_RELOC_ALPHA_TPREL_HI16",
+ "BFD_RELOC_ALPHA_TPREL_LO16",
+ "BFD_RELOC_ALPHA_TPREL16",
++ "BFD_RELOC_SW64_GPDISP_HI16",
++ "BFD_RELOC_SW64_GPDISP_LO16",
++ "BFD_RELOC_SW64_GPDISP",
++ "BFD_RELOC_SW64_LITERAL",
++ "BFD_RELOC_SW64_ELF_LITERAL",
++ "BFD_RELOC_SW64_LITUSE",
++ "BFD_RELOC_SW64_HINT",
++ "BFD_RELOC_SW64_LINKAGE",
++ "BFD_RELOC_SW64_CODEADDR",
++ "BFD_RELOC_SW64_GPREL_HI16",
++ "BFD_RELOC_SW64_GPREL_LO16",
++ "BFD_RELOC_SW64_BRSGP",
++ "BFD_RELOC_SW64_NOP",
++ "BFD_RELOC_SW64_BSR",
++ "BFD_RELOC_SW64_LDA",
++ "BFD_RELOC_SW64_BOH",
++ "BFD_RELOC_SW64_TLSGD",
++ "BFD_RELOC_SW64_TLSLDM",
++ "BFD_RELOC_SW64_DTPMOD64",
++ "BFD_RELOC_SW64_GOTDTPREL16",
++ "BFD_RELOC_SW64_DTPREL64",
++ "BFD_RELOC_SW64_DTPREL_HI16",
++ "BFD_RELOC_SW64_DTPREL_LO16",
++ "BFD_RELOC_SW64_DTPREL16",
++ "BFD_RELOC_SW64_GOTTPREL16",
++ "BFD_RELOC_SW64_TPREL64",
++ "BFD_RELOC_SW64_TPREL_HI16",
++ "BFD_RELOC_SW64_TPREL_LO16",
++ "BFD_RELOC_SW64_TPREL16",
+ "BFD_RELOC_MIPS_JMP",
+ "BFD_RELOC_MICROMIPS_JMP",
+ "BFD_RELOC_MIPS16_JMP",
+diff -Naur gdb-14.1-after-patch/bfd/Makefile.am gdb-14.1-sw64/bfd/Makefile.am
+--- gdb-14.1-after-patch/bfd/Makefile.am 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/bfd/Makefile.am 2025-03-03 10:59:12.950000000 +0800
+@@ -96,6 +96,7 @@
+ # with the decls and initializer in archures.c.
+ ALL_MACHINES = \
+ cpu-aarch64.lo \
++ cpu-sw64.lo \
+ cpu-alpha.lo \
+ cpu-amdgcn.lo \
+ cpu-arc.lo \
+@@ -180,6 +181,7 @@
+
+ ALL_MACHINES_CFILES = \
+ cpu-aarch64.c \
++ cpu-sw64.c \
+ cpu-alpha.c \
+ cpu-amdgcn.c \
+ cpu-arc.c \
+@@ -552,6 +554,7 @@
+ elf32-riscv.lo \
+ elf32-score.lo \
+ elf32-score7.lo \
++ elf64-sw64.lo \
+ elf64-aarch64.lo \
+ elf64-alpha.lo \
+ elf64-amdgcn.lo \
+@@ -686,7 +689,7 @@
+ $(OPTIONAL_BACKENDS_CFILES)
+
+ BUILD_CFILES = \
+- elf32-aarch64.c elf64-aarch64.c \
++ elf32-aarch64.c elf64-aarch64.c elf64-sw64.c \
+ elf32-kvx.c elf64-kvx.c \
+ elf32-ia64.c elf64-ia64.c \
+ elf32-loongarch.c elf64-loongarch.c \
+@@ -700,7 +703,7 @@
+ SOURCE_HFILES = \
+ aout-target.h aoutx.h arc-got.h arc-plt.h \
+ coff-arm.h coff-bfd.h coffcode.h coffswap.h \
+- cpu-aarch64.h cpu-arm.h cpu-h8300.h cpu-m68k.h cpu-riscv.h \
++ cpu-aarch64.h cpu-arm.h cpu-h8300.h cpu-m68k.h cpu-riscv.h cpu-sw64.h \
+ ecoff-bfd.h ecoffswap.h \
+ elf32-arm.h elf32-avr.h elf32-bfin.h elf32-cr16.h elf32-csky.h \
+ elf32-dlx.h elf32-hppa.h elf32-m68hc1x.h elf32-m68k.h \
+diff -Naur gdb-14.1-after-patch/bfd/Makefile.in gdb-14.1-sw64/bfd/Makefile.in
+--- gdb-14.1-after-patch/bfd/Makefile.in 2023-12-03 13:23:54.000000000 +0800
++++ gdb-14.1-sw64/bfd/Makefile.in 2025-03-03 10:59:12.950000000 +0800
+@@ -551,6 +551,7 @@
+ # with the decls and initializer in archures.c.
+ ALL_MACHINES = \
+ cpu-aarch64.lo \
++ cpu-sw64.lo \
+ cpu-alpha.lo \
+ cpu-amdgcn.lo \
+ cpu-arc.lo \
+@@ -635,6 +636,7 @@
+
+ ALL_MACHINES_CFILES = \
+ cpu-aarch64.c \
++ cpu-sw64.c \
+ cpu-alpha.c \
+ cpu-amdgcn.c \
+ cpu-arc.c \
+@@ -998,6 +1000,7 @@
+ BFD64_BACKENDS = \
+ aix5ppc-core.lo \
+ aout64.lo \
++ coff-sw64.lo \
+ coff-alpha.lo \
+ coff-x86_64.lo \
+ coff64-rs6000.lo \
+@@ -1010,6 +1013,7 @@
+ elf32-score.lo \
+ elf32-score7.lo \
+ elf64-aarch64.lo \
++ elf64-sw64.lo \
+ elf64-alpha.lo \
+ elf64-amdgcn.lo \
+ elf64-bpf.lo \
+@@ -1055,6 +1059,7 @@
+ BFD64_BACKENDS_CFILES = \
+ aix5ppc-core.c \
+ aout64.c \
++ coff-sw64.c \
+ coff-alpha.c \
+ coff-x86_64.c \
+ coff64-rs6000.c \
+@@ -1142,7 +1147,7 @@
+ $(OPTIONAL_BACKENDS_CFILES)
+
+ BUILD_CFILES = \
+- elf32-aarch64.c elf64-aarch64.c \
++ elf32-aarch64.c elf64-aarch64.c elf64-sw64.c \
+ elf32-kvx.c elf64-kvx.c \
+ elf32-ia64.c elf64-ia64.c \
+ elf32-loongarch.c elf64-loongarch.c \
+@@ -1153,7 +1158,7 @@
+ SOURCE_HFILES = \
+ aout-target.h aoutx.h arc-got.h arc-plt.h \
+ coff-arm.h coff-bfd.h coffcode.h coffswap.h \
+- cpu-aarch64.h cpu-arm.h cpu-h8300.h cpu-m68k.h cpu-riscv.h \
++ cpu-aarch64.h cpu-arm.h cpu-h8300.h cpu-m68k.h cpu-riscv.h cpu-sw64.h\
+ ecoff-bfd.h ecoffswap.h \
+ elf32-arm.h elf32-avr.h elf32-bfin.h elf32-cr16.h elf32-csky.h \
+ elf32-dlx.h elf32-hppa.h elf32-m68hc1x.h elf32-m68k.h \
+@@ -1445,6 +1450,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cache.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cf-i386lynx.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cisco-core.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-sw64.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-alpha.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-bfd.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coff-go32.Plo@am__quote@
+@@ -1465,6 +1471,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/compress.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/corefile.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-aarch64.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-sw64.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-alpha.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-amdgcn.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu-arc.Plo@am__quote@
+@@ -1631,6 +1638,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32-z80.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf32.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-aarch64.Plo@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-sw64.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-alpha.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-amdgcn.Plo@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/elf64-bpf.Plo@am__quote@
+diff -Naur gdb-14.1-after-patch/bfd/targets.c gdb-14.1-sw64/bfd/targets.c
+--- gdb-14.1-after-patch/bfd/targets.c 2023-10-08 15:51:14.000000000 +0800
++++ gdb-14.1-sw64/bfd/targets.c 2025-03-03 10:59:13.020000000 +0800
+@@ -685,6 +685,8 @@
+ extern const bfd_target aarch64_mach_o_vec;
+ extern const bfd_target aarch64_pei_le_vec;
+ extern const bfd_target aarch64_pe_le_vec;
++extern const bfd_target sw64_ecoff_le_vec;
++extern const bfd_target sw64_elf64_vec;
+ extern const bfd_target alpha_ecoff_le_vec;
+ extern const bfd_target alpha_elf64_vec;
+ extern const bfd_target alpha_elf64_fbsd_vec;
+@@ -1006,6 +1008,8 @@
+ #endif
+
+ #ifdef BFD64
++ &sw64_ecoff_le_vec,
++ &sw64_elf64_vec,
+ &alpha_ecoff_le_vec,
+ &alpha_elf64_vec,
+ &alpha_elf64_fbsd_vec,
diff --git a/gdb.spec b/gdb.spec
index 2c1f382e93a90e3c10034456d29d4f4e6c7fa080..0388a1d1610c4e96b0eb37945beec0018c42025a 100644
--- a/gdb.spec
+++ b/gdb.spec
@@ -1,6 +1,6 @@
Name: gdb
Version: 14.1
-Release: 11
+Release: 12
License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ and GPLv2+ with exceptions and GPL+ and LGPLv2+ and LGPLv3+ and BSD and Public Domain and GFDL-1.3
Source: https://ftp.gnu.org/gnu/gdb/gdb-%{version}.tar.xz
@@ -66,6 +66,12 @@ Patch52: backport-CVE-2025-11083.patch
Patch53: backport-CVE-2025-11412.patch
Patch54: backport-CVE-2025-11840.patch
+patch55: gdb-14.1-add-support-for-SW64-001.patch
+patch56: gdb-14.1-add-support-for-SW64-002.patch
+patch57: gdb-14.1-add-support-for-SW64-003.patch
+patch58: gdb-14.1-add-support-for-SW64-004.patch
+patch59: gdb-14.1-add-support-for-SW64-005.patch
+patch60: gdb-14.1-add-support-for-SW64-006.patch
Patch9000: 0001-set-entry-point-when-text-segment-is-missing.patch
%global gdb_src gdb-%{version}
@@ -202,7 +208,7 @@ export CXXFLAGS="$CFLAGS"
--with-lzma \
--without-libunwind \
--enable-64-bit-bfd \
-%ifnarch riscv64 loongarch64
+%ifnarch riscv64 loongarch64 sw_64
--enable-inprocess-agent \
%endif
--with-system-zlib \
@@ -324,7 +330,7 @@ rm -f $RPM_BUILD_ROOT%{_datadir}/gdb/python/gdb/command/backtrace.py
%files gdbserver
%{_bindir}/gdbserver
-%ifnarch riscv64 loongarch64
+%ifnarch riscv64 loongarch64 sw_64
%{_libdir}/libinproctrace.so
%endif
@@ -342,6 +348,9 @@ rm -f $RPM_BUILD_ROOT%{_datadir}/gdb/python/gdb/command/backtrace.py
%{_infodir}/ctf-spec.info*
%changelog
+* Wed Oct 15 2025 Zhu lianghua - 14.1-12
+- Add support for SW64
+
* Tue Nov 04 2025 wangxiao - 14.1-11
- fix CVE-2025-11840