diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 64d77f9dea3916eb2dca1d2aaa62cacc6213467a..4f5f012706871482acf2452d11ffbfffeb77245b 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -48,6 +48,7 @@ config SW64 select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION select AUDIT_ARCH @@ -97,6 +98,7 @@ config SW64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if STACKTRACE + select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS select IRQ_FORCED_THREADING select MEMORY_HOTPLUG_SPARSE if MEMORY_HOTPLUG @@ -108,6 +110,7 @@ config SW64 select SET_FS select SPARSEMEM_EXTREME if SPARSEMEM select SWIOTLB + select THREAD_INFO_IN_TASK config LOCKDEP_SUPPORT def_bool y @@ -228,16 +231,16 @@ config SW64_ASIC endchoice config SW64_CHIP3_ASIC_DEBUG - bool "Debug Support for Chip3 Asic" - depends on SW64_ASIC - help - Used for debug + bool "Debug Support for Chip3 Asic" + depends on SW64_ASIC + help + Used for debug config CPUFREQ_DEBUGFS - bool "CPU Frequency debugfs interface for Chip3 Asic" - depends on SW64_CHIP3 && DEBUG_FS - help - Turns on the DebugFS interface for CPU Frequency. + bool "CPU Frequency debugfs interface for Chip3 Asic" + depends on SW64_CHIP3 && DEBUG_FS + help + Turns on the DebugFS interface for CPU Frequency. If you don't know what to do here, say N. @@ -255,13 +258,6 @@ config PLATFORM_XUELANG endchoice -config LEGACY_XUELANG - bool "Xuelang Reset Interface" - depends on SW64_CHIP3 - help - This enables the legacy reset driver for SW64 chip3 CRBs. This interface - as a temporary solution will be deprecated in the future. - config MIGHT_HAVE_PC_SERIO bool "Use PC serio device i8042" select ARCH_MIGHT_HAVE_PC_SERIO @@ -273,251 +269,14 @@ config LOCK_MEMB bool "Insert mem barrier before lock instruction" default y -menu "CPU Frequency scaling" - -config CPU_FREQ - bool "CPU Frequency scaling" - select SRCU - help - CPU Frequency scaling allows you to change the clock speed of - CPUs on the fly. This is a nice method to save power, because - the lower the CPU clock speed, the less power the CPU consumes. - - Note that this driver doesn't automatically change the CPU - clock speed, you need to either enable a dynamic cpufreq governor - (see below) after boot, or use a userspace tool. - - For details, take a look at . - - If in doubt, say N. - -if CPU_FREQ - -config SW64_CPUFREQ - bool "sw64 CPU Frequency interface for Chip3 Asic" - depends on SW64_CHIP3 - default y - help - Turns on the interface for SW64_CPU Frequency. +source "drivers/cpufreq/Kconfig" config SW64_CPUAUTOPLUG bool "sw64 CPU Autoplug interface" depends on SW64_CPUFREQ default y help - Turns on the interface for SW64_CPU CPUAUTOPLUG. - -config CPU_FREQ_GOV_ATTR_SET - bool - -config CPU_FREQ_GOV_COMMON - select CPU_FREQ_GOV_ATTR_SET - select IRQ_WORK - bool - -config CPU_FREQ_BOOST_SW - bool - depends on THERMAL - -config CPU_FREQ_STAT - bool "CPU frequency transition statistics" - help - Export CPU frequency statistics information through sysfs. - - If in doubt, say N. - -choice - prompt "Default CPUFreq governor" - default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ - default CPU_FREQ_DEFAULT_GOV_PERFORMANCE - help - This option sets which CPUFreq governor shall be loaded at - startup. If in doubt, select 'performance'. - -config CPU_FREQ_DEFAULT_GOV_PERFORMANCE - bool "performance" - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'performance' as default. This sets - the frequency statically to the highest frequency supported by - the CPU. - -config CPU_FREQ_DEFAULT_GOV_POWERSAVE - bool "powersave" - select CPU_FREQ_GOV_POWERSAVE - help - Use the CPUFreq governor 'powersave' as default. This sets - the frequency statically to the lowest frequency supported by - the CPU. - -config CPU_FREQ_DEFAULT_GOV_USERSPACE - bool "userspace" - select CPU_FREQ_GOV_USERSPACE - help - Use the CPUFreq governor 'userspace' as default. This allows - you to set the CPU frequency manually or when a userspace - program shall be able to set the CPU dynamically without having - to enable the userspace governor manually. - -config CPU_FREQ_DEFAULT_GOV_ONDEMAND - bool "ondemand" - select CPU_FREQ_GOV_ONDEMAND - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'ondemand' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the ondemand - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE - bool "conservative" - select CPU_FREQ_GOV_CONSERVATIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'conservative' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the conservative - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL - bool "schedutil" - depends on SMP - select CPU_FREQ_GOV_SCHEDUTIL - select CPU_FREQ_GOV_PERFORMANCE - help - Use the 'schedutil' CPUFreq governor by default. If unsure, - have a look at the help section of that governor. The fallback - governor will be 'performance'. - -endchoice - -config CPU_FREQ_GOV_PERFORMANCE - tristate "'performance' governor" - help - This cpufreq governor sets the frequency statically to the - highest available CPU frequency. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_performance. - - If in doubt, say Y. - -config CPU_FREQ_GOV_POWERSAVE - tristate "'powersave' governor" - help - This cpufreq governor sets the frequency statically to the - lowest available CPU frequency. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_powersave. - - If in doubt, say Y. - -config CPU_FREQ_GOV_USERSPACE - tristate "'userspace' governor for userspace frequency scaling" - help - Enable this cpufreq governor when you either want to set the - CPU frequency manually or when a userspace program shall - be able to set the CPU dynamically, like on LART - . - - To compile this driver as a module, choose M here: the - module will be called cpufreq_userspace. - - For details, take a look at . - - If in doubt, say Y. - -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_GOV_COMMON - help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). - - To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_CONSERVATIVE - tristate "'conservative' cpufreq governor" - depends on CPU_FREQ - select CPU_FREQ_GOV_COMMON - help - 'conservative' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_conservative. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_SCHEDUTIL - bool "'schedutil' cpufreq policy governor" - depends on CPU_FREQ && SMP - select CPU_FREQ_GOV_ATTR_SET - select IRQ_WORK - help - This governor makes decisions based on the utilization data provided - by the scheduler. It sets the CPU frequency to be proportional to - the utilization/capacity ratio coming from the scheduler. If the - utilization is frequency-invariant, the new frequency is also - proportional to the maximum available frequency. If that is not the - case, it is proportional to the current frequency of the CPU. The - frequency tipping point is at utilization/capacity equal to 80% in - both cases. - - If in doubt, say N. - -comment "CPU frequency scaling drivers" - -config CPUFREQ_DT - tristate "Generic DT based cpufreq driver" - depends on HAVE_CLK && OF - # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: - depends on !CPU_THERMAL || THERMAL - select CPUFREQ_DT_PLATDEV - select PM_OPP - help - This adds a generic DT based cpufreq driver for frequency management. - It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) - systems. - - If in doubt, say N. - -config CPUFREQ_DT_PLATDEV - bool - help - This adds a generic DT based cpufreq platdev driver for frequency - management. This creates a 'cpufreq-dt' platform device, on the - supported platforms. - - If in doubt, say N. - -endif -endmenu + Turns on the interface for SW64_CPU CPUAUTOPLUG. # clear all implied options (don't want default values for those): # Most of these machines have ISA slots; not exactly sure which don't, @@ -526,11 +285,11 @@ config ISA bool default y help - Find out whether you have ISA slots on your motherboard. ISA is the + Find out whether you have ISA slots on your motherboard. ISA is the name of a bus system, i.e. the way the CPU talks to the other stuff - inside your box. Other bus systems are PCI, EISA, MicroChannel - (MCA) or VESA. ISA is an older system, now being displaced by PCI; - newer boards don't support it. If you have ISA, say Y, otherwise N. + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. config ISA_DMA_API bool @@ -560,15 +319,15 @@ config KEXEC select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot - but it is independent of the system firmware. And like a reboot + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot you can start any kernel with it, not just Linux. The name comes from the similarity to the exec system call. It is an ongoing process to be certain the hardware in a machine is properly shutdown, so do not be surprised if this code does not - initially work for you. As of this writing the exact hardware + initially work for you. As of this writing the exact hardware interface is strongly in flux, so no good recommendation can be made. @@ -631,7 +390,7 @@ config SMP If you don't know what to do here, say N. config ARCH_PROC_KCORE_TEXT - def_bool y + def_bool y config HAVE_DEC_LOCK bool "Use arch-specified dec_and_lock" @@ -674,8 +433,8 @@ config NUMA depends on SMP && !FLATMEM help Say Y to compile the kernel to support NUMA (Non-Uniform Memory - Access). This option is for configuring high-end multiprocessor - server machines. If in doubt, say N. + Access). This option is for configuring high-end multiprocessor + server machines. If in doubt, say N. config USE_PERCPU_NUMA_NODE_ID def_bool y @@ -808,7 +567,7 @@ config CMDLINE_BOOL bool "Built-in kernel command line" help Allow for specifying boot arguments to the kernel at - build time. On some systems (e.g. embedded ones), it is + build time. On some systems (e.g. embedded ones), it is necessary or convenient to provide some or all of the kernel boot arguments with the kernel itself (that is, to not rely on the boot loader to provide them.) @@ -826,7 +585,7 @@ config CMDLINE default "" help Enter arguments here that should be compiled into the kernel - image and used at boot time. If the boot loader provides a + image and used at boot time. If the boot loader provides a command line at boot time, it is appended to this string to form the full kernel command line, when the system boots. @@ -844,7 +603,7 @@ config CMDLINE_OVERRIDE Set this option to 'Y' to have the kernel ignore the boot loader command line, and use ONLY the built-in command line. - This is used to work around broken boot loaders. This should + This is used to work around broken boot loaders. This should be set to 'N' under normal conditions. config FORCE_MAX_ZONEORDER @@ -854,8 +613,8 @@ config FORCE_MAX_ZONEORDER help The kernel memory allocator divides physically contiguous memory blocks into "zones", where each zone is a power of two number of - pages. This option selects the largest power of two that the kernel - keeps in the memory allocator. If you need to allocate very large + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large blocks of physically contiguous memory, then you may need to increase this value. diff --git a/arch/sw_64/chip/chip3/chip.c b/arch/sw_64/chip/chip3/chip.c index d0b1c1c1c6df7ac3a95c1a78832477aee58e2c11..819b9c3728f643323f47ccad536e5768e2d3f2c5 100644 --- a/arch/sw_64/chip/chip3/chip.c +++ b/arch/sw_64/chip/chip3/chip.c @@ -124,6 +124,18 @@ static int chip3_get_cpu_nums(void) return cpus; } +static void chip3_get_vt_smp_info(void) +{ + unsigned long smp_info; + + smp_info = sw64_io_read(0, SMP_INFO); + if (smp_info == -1UL) + smp_info = 0; + topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; + topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; + topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; +} + static unsigned long chip3_get_vt_node_mem(int nodeid) { return *(unsigned long *)MMSIZE & MMSIZE_MASK; @@ -528,6 +540,18 @@ static void chip3_pcie_save(void) piu_save->msiconfig[i] = read_piu_ior0(node, index, MSICONFIG0 + (i << 7)); } + + piu_save->iommuexcpt_ctrl = read_piu_ior0(node, index, IOMMUEXCPT_CTRL); + piu_save->dtbaseaddr = read_piu_ior0(node, index, DTBASEADDR); + + piu_save->intaconfig = read_piu_ior0(node, index, INTACONFIG); + piu_save->intbconfig = read_piu_ior0(node, index, INTBCONFIG); + piu_save->intcconfig = read_piu_ior0(node, index, INTCCONFIG); + piu_save->intdconfig = read_piu_ior0(node, index, INTDCONFIG); + piu_save->pmeintconfig = read_piu_ior0(node, index, PMEINTCONFIG); + piu_save->aererrintconfig = read_piu_ior0(node, index, AERERRINTCONFIG); + piu_save->hpintconfig = read_piu_ior0(node, index, HPINTCONFIG); + } } @@ -555,6 +579,17 @@ static void chip3_pcie_restore(void) piu_save->msiconfig[i]); } + write_piu_ior0(node, index, IOMMUEXCPT_CTRL, piu_save->iommuexcpt_ctrl); + write_piu_ior0(node, index, DTBASEADDR, piu_save->dtbaseaddr); + + write_piu_ior0(node, index, INTACONFIG, piu_save->intaconfig); + write_piu_ior0(node, index, INTBCONFIG, piu_save->intbconfig); + write_piu_ior0(node, index, INTCCONFIG, piu_save->intcconfig); + write_piu_ior0(node, index, INTDCONFIG, piu_save->intdconfig); + write_piu_ior0(node, index, PMEINTCONFIG, piu_save->pmeintconfig); + write_piu_ior0(node, index, AERERRINTCONFIG, piu_save->aererrintconfig); + write_piu_ior0(node, index, HPINTCONFIG, piu_save->hpintconfig); + /* Enable DBI_RO_WR_EN */ rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); @@ -604,41 +639,6 @@ static inline void chip3_spbu_restore(void) sw64_io_write(0, MCU_DVC_INT_EN, saved_dvc_int); } -#define BIOS_SECBIN 0x2F00000UL -#define BIOS_SECSIZE 0x40000UL -#define BOUNCE_BUFFER ((1UL<<32) - BIOS_SECSIZE) -#define BIOS_MEMSAVE ((1UL<<32) - 2 * BIOS_SECSIZE) - -/* - * Due to specific architecture PCI MEM32 addressing, we reserve 512M memory - * size at PCI_32BIT_MEMIO (0xE000_0000) on SW64 platform. - * - * Since this memory region is still usable by OS, we implement a interface - * contract between BIOS and kernel: - * - * Firstly BIOS should back up SEC relative code segment to BIOS_MEMSAVE region - * with the length BIOS_SECSIZE in order to restore BIOS SEC phase binary during - * S3 sleep. - * - * Secondly kernel should use a bounce buffer to save memory region which may be - * overwritten by BIOS on resume from S3 sleep. - */ -static void chip3_mem_restore(void) -{ - void *dst, *src; - unsigned long size = BIOS_SECSIZE; - - /* Firstly kernel back up to a bounce buffer */ - src = __va(BIOS_SECBIN); - dst = __va(BOUNCE_BUFFER); - memcpy(dst, src, size); - - /* Secondly restore BIOS SEC phase binary */ - src = __va(BIOS_MEMSAVE); - dst = __va(BIOS_SECBIN); - memcpy(dst, src, size); -} - extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); static void chip3_suspend(bool wakeup) @@ -655,7 +655,6 @@ static void chip3_suspend(bool wakeup) chip3_spbu_save(); chip3_intpu_save(); chip3_pcie_save(); - chip3_mem_restore(); } } @@ -712,6 +711,7 @@ static void chip3_init_ops_fixup(void) if (is_guest_or_emul()) { sw64_chip_init->early_init.setup_core_start = chip3_setup_vt_core_start; sw64_chip_init->early_init.get_node_mem = chip3_get_vt_node_mem; + sw64_chip_init->early_init.get_smp_info = chip3_get_vt_smp_info; sw64_chip_init->pci_init.check_pci_linkup = chip3_check_pci_vt_linkup; } }; @@ -826,6 +826,7 @@ asmlinkage void do_entInt(unsigned long type, unsigned long vector, unsigned long irq_arg, struct pt_regs *regs) { struct pt_regs *old_regs; + extern char __idle_start[], __idle_end[]; if (is_guest_or_emul()) { if ((type & 0xffff) > 15) { @@ -837,6 +838,10 @@ asmlinkage void do_entInt(unsigned long type, unsigned long vector, } } + /* restart idle routine if it is interrupted */ + if (regs->pc > (u64)__idle_start && regs->pc < (u64)__idle_end) + regs->pc = (u64)__idle_start; + switch (type & 0xffff) { case INT_MSI: old_regs = set_irq_regs(regs); diff --git a/arch/sw_64/chip/chip3/msi.c b/arch/sw_64/chip/chip3/msi.c index 43688c96ccabeda7f5152da9f82d0c87f3d47abf..a0ab4de8fa294e4f548f2feabafd3eca2a228bff 100644 --- a/arch/sw_64/chip/chip3/msi.c +++ b/arch/sw_64/chip/chip3/msi.c @@ -5,6 +5,7 @@ #include #include +#include static struct irq_domain *msi_default_domain; static DEFINE_RAW_SPINLOCK(vector_lock); @@ -12,19 +13,6 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... PERCPU_MSI_IRQS - 1] = 0, }; -struct sw64_msi_chip_data { - spinlock_t cdata_lock; - unsigned long msi_config; - unsigned long rc_node; - unsigned long rc_index; - unsigned int msi_config_index; - unsigned int dst_coreid; - unsigned int vector; - unsigned int prev_coreid; - unsigned int prev_vector; - bool move_in_progress; -}; - static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) { struct sw64_msi_chip_data *data; @@ -48,52 +36,59 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) msg->data = chip_data->msi_config_index; } -static bool find_free_core_vector(const struct cpumask *search_mask, int *found_coreid, int *found_vector) +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) { - int vector, coreid; - bool found = false, find_once_global = false; + int vector, max_vector, cpu; + bool find_once_global = false; - coreid = cpumask_first(search_mask); + cpu = cpumask_first(search_mask); try_again: - for (vector = 0; vector < 256; vector++) { - while (per_cpu(vector_irq, coreid)[vector]) { - coreid = cpumask_next(coreid, search_mask); - if (coreid >= nr_cpu_ids) { + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { if (vector == 255) { if (find_once_global) { printk("No global free vector\n"); - return found; + return false; } printk("No local free vector\n"); search_mask = cpu_online_mask; - coreid = cpumask_first(search_mask); + cpu = cpumask_first(search_mask); find_once_global = true; goto try_again; } - coreid = cpumask_first(search_mask); + cpu = cpumask_first(search_mask); break; } } - if (!per_cpu(vector_irq, coreid)[vector]) + if (!per_cpu(vector_irq, cpu)[vector]) break; } - found = true; - *found_coreid = coreid; + *found_cpu = cpu; *found_vector = vector; - return found; + return true; } -static unsigned long set_piu_msi_config(struct pci_controller *hose, int found_coreid, - int msiconf_index, int found_vector) +static unsigned long set_piu_msi_config(struct pci_controller *hose, int cpu, + int msiconf_index, int vector) { unsigned int reg; unsigned long msi_config; - int phy_coreid; + int phy_cpu; - msi_config = (1UL << 62) | ((unsigned long)found_vector << 10); - phy_coreid = cpu_to_rcid(found_coreid); - msi_config |= ((phy_coreid >> 5) << 6) | (phy_coreid & 0x1f); + msi_config = (1UL << 62) | ((unsigned long)vector << 10); + phy_cpu = cpu_to_rcid(cpu); + msi_config |= ((phy_cpu >> 5) << 6) | (phy_cpu & 0x1f); reg = MSICONFIG0 + (unsigned long)(msiconf_index << 7); write_piu_ior0(hose->node, hose->index, reg, msi_config); msi_config = read_piu_ior0(hose->node, hose->index, reg); @@ -110,7 +105,7 @@ static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, struct msi_desc *entry; struct cpumask searchmask; unsigned long flags, msi_config; - int found_vector, found_coreid; + int vector, cpu; /* Is this valid ? */ if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) @@ -125,14 +120,17 @@ static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, if (!cdata) return -ENOMEM; - /* If existing target coreid is already in the new mask, and is online then do nothing.*/ - if (cpu_online(cdata->dst_coreid) && cpumask_test_cpu(cdata->dst_coreid, cpumask)) + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) return IRQ_SET_MASK_OK; raw_spin_lock_irqsave(&vector_lock, flags); cpumask_and(&searchmask, cpumask, cpu_online_mask); - if (!find_free_core_vector(&searchmask, &found_coreid, &found_vector)) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { raw_spin_unlock_irqrestore(&vector_lock, flags); return -ENOSPC; } @@ -141,12 +139,12 @@ static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, entry = irq_get_msi_desc(irqd->irq); hose = (struct pci_controller *)msi_desc_to_pci_sysdata(entry); spin_lock(&cdata->cdata_lock); - per_cpu(vector_irq, found_coreid)[found_vector] = irqd->irq; - msi_config = set_piu_msi_config(hose, found_coreid, cdata->msi_config_index, found_vector); + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + msi_config = set_piu_msi_config(hose, cpu, cdata->msi_config_index, vector); cdata->prev_vector = cdata->vector; - cdata->prev_coreid = cdata->dst_coreid; - cdata->dst_coreid = found_coreid; - cdata->vector = found_vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; cdata->msi_config = msi_config; cdata->move_in_progress = true; spin_unlock(&cdata->cdata_lock); @@ -178,8 +176,8 @@ static int __assign_irq_vector(int virq, unsigned int nr_irqs, const struct cpumask *mask; struct cpumask searchmask; struct sw64_msi_chip_data *cdata; - int msiconf_index, coreid, node; - int i, found_vector, found_coreid; + int msiconf_index, node; + int i, vector, cpu; unsigned long msi_config; int start_index; @@ -207,15 +205,14 @@ static int __assign_irq_vector(int virq, unsigned int nr_irqs, cpumask_copy(&searchmask, cpumask_of_node(node)); } - coreid = cpumask_first(&searchmask); - if (coreid >= nr_cpu_ids) + if (cpumask_first(&searchmask) >= nr_cpu_ids) cpumask_copy(&searchmask, cpu_online_mask); for (i = 0; i < nr_irqs; i++) { - if (!find_free_core_vector(&searchmask, &found_coreid, &found_vector)) + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) return -ENOSPC; - per_cpu(vector_irq, found_coreid)[found_vector] = virq + i; + per_cpu(vector_irq, cpu)[vector] = virq + i; if (i) { irq_data = irq_domain_get_irq_data(domain, virq + i); @@ -230,16 +227,16 @@ static int __assign_irq_vector(int virq, unsigned int nr_irqs, irq_data->chip_data = cdata; msiconf_index = start_index + i; - msi_config = set_piu_msi_config(hose, found_coreid, msiconf_index, found_vector); + msi_config = set_piu_msi_config(hose, cpu, msiconf_index, vector); - cdata->dst_coreid = found_coreid; - cdata->vector = found_vector; + cdata->dst_cpu = cpu; + cdata->vector = vector; cdata->rc_index = hose->index; cdata->rc_node = hose->node; cdata->msi_config = msi_config; cdata->msi_config_index = msiconf_index; - cdata->prev_coreid = found_coreid; - cdata->prev_vector = found_vector; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; cdata->move_in_progress = false; } return 0; @@ -279,7 +276,7 @@ static void sw64_vector_free_irqs(struct irq_domain *domain, clear_bit(cdata->msi_config_index, hose->piu_msiconfig); } irq_domain_reset_irq_data(irq_data); - per_cpu(vector_irq, cdata->dst_coreid)[cdata->vector] = 0; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; kfree(cdata); raw_spin_unlock_irqrestore(&vector_lock, flags); } @@ -288,8 +285,10 @@ static void sw64_vector_free_irqs(struct irq_domain *domain, static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) { - if (is_guest_or_emul()) + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); return irq_free_descs(virq, nr_irqs); + } return irq_domain_free_irqs(virq, nr_irqs); } @@ -399,22 +398,22 @@ void arch_init_msi_domain(struct irq_domain *parent) pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); } -static void irq_move_complete(struct sw64_msi_chip_data *cdata, int coreid, int vector) +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) { if (likely(!cdata->move_in_progress)) return; - if (vector == cdata->vector && cdata->dst_coreid == coreid) { + if (vector == cdata->vector && cdata->dst_cpu == cpu) { raw_spin_lock(&vector_lock); cdata->move_in_progress = 0; - per_cpu(vector_irq, cdata->prev_coreid)[cdata->prev_vector] = 0; + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector] = 0; raw_spin_unlock(&vector_lock); } } void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) { - int i, msi_index = 0; - int vector_index = 0, logical_cid; + int i, irq, piu_index, msi_index = 0; + int cpu, vector_index = 0; unsigned long value = 0; unsigned long int_pci_msi[3]; unsigned long *ptr; @@ -422,7 +421,9 @@ void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned struct sw64_msi_chip_data *cdata; if (is_guest_or_emul()) { - handle_irq(vector); + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); return; } @@ -431,25 +432,22 @@ void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned int_pci_msi[1] = *(ptr + 1); int_pci_msi[2] = *(ptr + 2); - logical_cid = smp_processor_id(); + cpu = smp_processor_id(); for (i = 0; i < 4; i++) { vector_index = i * 64; while (vector != 0) { - int irq = 0; - int piu_index = 0; - msi_index = find_next_bit(&vector, 64, msi_index); if (msi_index == 64) { msi_index = 0; continue; } - irq = per_cpu(vector_irq, logical_cid)[vector_index + msi_index]; + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); cdata = irq_data_get_irq_chip_data(irq_data); spin_lock(&cdata->cdata_lock); - irq_move_complete(cdata, logical_cid, vector_index + msi_index); + irq_move_complete(cdata, cpu, vector_index + msi_index); piu_index = cdata->msi_config_index; value = cdata->msi_config | (1UL << 63); write_piu_ior0(cdata->rc_node, cdata->rc_index, MSICONFIG0 + (piu_index << 7), value); diff --git a/arch/sw_64/chip/chip3/vt_msi.c b/arch/sw_64/chip/chip3/vt_msi.c index 428757642342e01491332a682e01668563ed5557..0cdf7f196e8a0f24ea1c4c23718f8cd9f866e086 100644 --- a/arch/sw_64/chip/chip3/vt_msi.c +++ b/arch/sw_64/chip/chip3/vt_msi.c @@ -4,35 +4,54 @@ #include #include -#define QEMU_MSIX_MSG_ADDR (0x8000fee00000UL) - static DEFINE_RAW_SPINLOCK(vector_lock); -static struct irq_chip msi_chip = { +static void __vt_irq_msi_compose_msg(struct sw64_msi_chip_data *cdata, + struct msi_msg *msg) +{ + msg->address_hi = (u32)(VT_MSIX_MSG_ADDR >> 32); + msg->address_lo = (u32)(VT_MSIX_MSG_ADDR & 0xffffffff) + | VT_MSIX_ADDR_DEST_ID(cdata->dst_cpu); + msg->data = cdata->vector; +} + +static void vt_irq_msi_compose_msg(struct irq_data *irqd, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *cdata; + + cdata = irqd->chip_data; + __vt_irq_msi_compose_msg(cdata, msg); +} + +static void vt_irq_msi_update_msg(struct irq_data *irqd, + struct sw64_msi_chip_data *cdata) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __vt_irq_msi_compose_msg(cdata, msg); + pci_write_msi_msg(irqd->irq, msg); +} + +static struct irq_chip vt_pci_msi_controller = { .name = "PCI-MSI", .irq_unmask = pci_msi_unmask_irq, .irq_mask = pci_msi_mask_irq, .irq_ack = sw64_irq_noop, + .irq_compose_msi_msg = vt_irq_msi_compose_msg, }; -static int qemu_msi_compose_msg(unsigned int irq, struct msi_msg *msg) -{ - msg->address_hi = (unsigned int)(QEMU_MSIX_MSG_ADDR >> 32); - msg->address_lo = (unsigned int)(QEMU_MSIX_MSG_ADDR & 0xffffffff); - msg->data = irq; - return irq; -} - int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) { - struct msi_msg msg; int virq, val_node = 0; struct irq_data *irq_data; - struct sw6_msi_chip_data *cdata; + struct sw64_msi_chip_data *cdata; struct pci_controller *hose = (struct pci_controller *)dev->sysdata; unsigned long flags, node, rc_index; const struct cpumask *mask; + struct cpumask searchmask; + int cpu, vector; + node = hose->node; rc_index = hose->index; mask = cpumask_of_node(node); @@ -46,20 +65,46 @@ int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); if (virq < 0) { - pr_debug("cannot allocate IRQ(base 16, count %d)\n", desc->nvec_used); + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); raw_spin_unlock_irqrestore(&vector_lock, flags); return virq; } - qemu_msi_compose_msg(virq, &msg); - irq_set_msi_desc(virq, desc); - pci_write_msi_msg((virq), &msg); - irq_set_chip_and_handler_name(virq, &msi_chip, handle_edge_irq, "edge"); irq_data = irq_get_irq_data(virq); + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); if (!cdata) return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq; + + irq_set_msi_desc(virq, desc); + irq_set_chip_and_handler_name(virq, &vt_pci_msi_controller, + handle_edge_irq, "edge"); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); raw_spin_unlock_irqrestore(&vector_lock, flags); return 0; } @@ -68,15 +113,17 @@ EXPORT_SYMBOL(chip_setup_vt_msix_irq); int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct msi_desc *desc; - struct msi_msg msg; struct pci_controller *hose = (struct pci_controller *)dev->sysdata; struct irq_data *irq_data; - struct sw6_msi_chip_data *cdata; - int i = 0; + struct sw64_msi_chip_data *cdata; unsigned long node, rc_index; int virq = -1, val_node = 0; unsigned long flags; + const struct cpumask *mask; + struct cpumask searchmask; + int i, vector, cpu; + if (type == PCI_CAP_ID_MSI && nvec > 32) return 1; @@ -91,21 +138,46 @@ int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) val_node = node; virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); if (virq < 0) { - pr_debug("cannot allocate IRQ(base 16, count %d)\n", desc->nvec_used); + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); raw_spin_unlock_irqrestore(&vector_lock, flags); return virq; } - qemu_msi_compose_msg(virq, &msg); + + irq_data = irq_get_irq_data(virq); + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + for (i = 0; i < desc->nvec_used; i++) { - irq_set_msi_desc_off(virq, i, desc); - pci_write_msi_msg((virq + i), &msg); - desc->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); - irq_set_chip_and_handler_name(virq + i, &msi_chip, handle_edge_irq, "edge"); - irq_data = irq_get_irq_data(virq + i); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); if (!cdata) return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + irq_set_msi_desc_off(virq, i, desc); + desc->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); + irq_set_chip_and_handler_name(virq + i, &vt_pci_msi_controller, handle_edge_irq, "edge"); + irq_data = irq_get_irq_data(virq + i); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); } } @@ -114,6 +186,28 @@ int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) } EXPORT_SYMBOL(chip_setup_vt_msi_irqs); +void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs) +{ + int i; + unsigned long flags; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_get_irq_data(virq + i); + if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_data->hwirq = 0; + irq_data->chip = &no_irq_chip; + irq_data->chip_data = NULL; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + int __arch_setup_vt_msix_irqs(struct pci_dev *dev, int nvec, int type) { struct msi_desc *entry; diff --git a/arch/sw_64/configs/anolis_defconfig b/arch/sw_64/configs/anolis_defconfig index 7e1c54a80c6ff224e28dfbaa2c5bb6bdb406f22c..4ff69304ac78c8182a96c8d85fbbcf45e50e9053 100644 --- a/arch/sw_64/configs/anolis_defconfig +++ b/arch/sw_64/configs/anolis_defconfig @@ -6,6 +6,7 @@ CONFIG_USELIB=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y @@ -19,23 +20,17 @@ CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y -# CONFIG_SGETMASK_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y -CONFIG_DEBUG_PERF_USE_VMALLOC=y # CONFIG_COMPAT_BRK is not set CONFIG_SMP=y -CONFIG_NUMA=y -CONFIG_NODES_SHIFT=7 -CONFIG_EFI=y -CONFIG_PHYSICAL_START=0x900000 -# CONFIG_RELOCATABLE is not set -# CONFIG_ACPI_SPCR_TABLE is not set -CONFIG_ACPI_TAD=y -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_DMI_SYSFS=m -# CONFIG_EFI_CUSTOM_SSDT_OVERLAYS is not set -CONFIG_KVM=y +CONFIG_ACPI_TAD=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y @@ -43,7 +38,6 @@ CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y -# CONFIG_UNUSED_SYMBOLS is not set CONFIG_PARTITION_ADVANCED=y CONFIG_OSF_PARTITION=y CONFIG_BSD_DISKLABEL=y @@ -53,10 +47,8 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_LDM_PARTITION=y CONFIG_SGI_PARTITION=y CONFIG_ULTRIX_PARTITION=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA=y -CONFIG_CMA_AREAS=7 CONFIG_NET=y CONFIG_PACKET=y CONFIG_PACKET_DIAG=y @@ -528,7 +520,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_LIBPS2=y CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_16550A_VARIANTS=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_SERIAL_8250_PCI is not set CONFIG_SERIAL_OF_PLATFORM=y @@ -575,10 +566,6 @@ CONFIG_UIO_PCI_GENERIC=m CONFIG_VIRTIO_PCI=y # CONFIG_VIRTIO_PCI_LEGACY is not set CONFIG_VIRTIO_MMIO=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y CONFIG_STAGING=y CONFIG_FB_SM750=y CONFIG_EXT4_FS=y @@ -688,5 +675,6 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_HW is not set CONFIG_CONSOLE_LOGLEVEL_QUIET=7 # CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y # CONFIG_RCU_TRACE is not set # CONFIG_FTRACE is not set diff --git a/arch/sw_64/configs/kata_anolis_defconfig b/arch/sw_64/configs/kata_anolis_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..f553f0e71dbf865ff8a49109e0843291b52a98f1 --- /dev/null +++ b/arch/sw_64/configs/kata_anolis_defconfig @@ -0,0 +1,616 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +# CONFIG_SUSPEND is not set +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_DROP_MONITOR=m +# CONFIG_WIRELESS is not set +CONFIG_CAIF=m +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_guest_defconfig b/arch/sw_64/configs/kata_guest_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..8122155c127659ffdf6b0e0b0df3a2ce5f677cdf --- /dev/null +++ b/arch/sw_64/configs/kata_guest_defconfig @@ -0,0 +1,633 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_USE_OF=y +CONFIG_SW64_BUILTIN_DTB=y +CONFIG_SW64_BUILTIN_DTB_NAME="chip_vt" +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_NONBOOT_CORE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_SUNWAY_SUPERIO_AST2400=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/include/asm/chip3_io.h b/arch/sw_64/include/asm/chip3_io.h index 18e79cf2a36ba9aac5bd7dff9f93e3af59025c1f..7f64e8816ed315fd31965ce334f387be237b08af 100644 --- a/arch/sw_64/include/asm/chip3_io.h +++ b/arch/sw_64/include/asm/chip3_io.h @@ -28,6 +28,16 @@ #define IO_NODE_SHIFT 44 #define IO_MARK_BIT 47 +extern int topo_nr_threads; +extern int topo_nr_cores; +extern int topo_nr_maxcpus; +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + /* MSIConfig */ #define MSICONFIG_VALID (0x1UL << 63) #define MSICONFIG_EN (0x1UL << 62) @@ -148,6 +158,7 @@ enum { /* MCU CSR */ enum { + SMP_INFO = MCU_BASE | 0x80UL, INIT_CTL = MCU_BASE | 0x680UL, MT_STATE = MCU_BASE | 0x700UL, CORE_ONLINE = MCU_BASE | 0x780UL, diff --git a/arch/sw_64/include/asm/clock.h b/arch/sw_64/include/asm/clock.h index af6872ed9edbea4f6ab3f6b176bd0bc8398f9158..8a6548aa0a0dd7b9cce5f5667e046ff2887025a0 100644 --- a/arch/sw_64/include/asm/clock.h +++ b/arch/sw_64/include/asm/clock.h @@ -11,8 +11,6 @@ struct clk; -extern struct cpufreq_frequency_table sw64_clockmod_table[]; - extern char curruent_policy[CPUFREQ_NAME_LEN]; struct clk_ops { @@ -44,7 +42,7 @@ struct clk { int clk_init(void); -void sw64_set_rate(unsigned long rate); +void sw64_set_rate(unsigned int index); struct clk *sw64_clk_get(struct device *dev, const char *id); diff --git a/arch/sw_64/include/asm/current.h b/arch/sw_64/include/asm/current.h index 219b5ce9f4fc62a9b38696e1ce97d45a6e03c743..862caabb9c7092f561dca2613ab977fe5cc34cf0 100644 --- a/arch/sw_64/include/asm/current.h +++ b/arch/sw_64/include/asm/current.h @@ -2,9 +2,18 @@ #ifndef _ASM_SW64_CURRENT_H #define _ASM_SW64_CURRENT_H -#include +#ifndef __ASSEMBLY__ -#define get_current() (current_thread_info()->task) -#define current get_current() +struct task_struct; +static __always_inline struct task_struct *get_current(void) +{ + register struct task_struct *tp __asm__("$8"); + + return tp; +} + +#define current get_current() + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_SW64_CURRENT_H */ diff --git a/arch/sw_64/include/asm/debug.h b/arch/sw_64/include/asm/debug.h index f0507acc31a7c78edeac4f93fde2dc4f74b1eac0..8db5a8bb9ab72dc1165157a02a111acd07b5c20b 100644 --- a/arch/sw_64/include/asm/debug.h +++ b/arch/sw_64/include/asm/debug.h @@ -24,4 +24,15 @@ */ extern struct dentry *sw64_debugfs_dir; +#define UNA_MAX_ENTRIES 64 + +struct unaligned_stat { + unsigned long pc; + unsigned long va; +}; + +extern char unaligned_task[]; +extern unsigned long unaligned_count; +extern struct unaligned_stat unaligned[]; + #endif /* _ASM_SW64_DEBUG_H */ diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h index ae68505e5e117306fb216ebec27588efd01a5466..b9af3592cb28bbde48273649666cdf32d2be20d0 100644 --- a/arch/sw_64/include/asm/efi.h +++ b/arch/sw_64/include/asm/efi.h @@ -6,6 +6,10 @@ #include #ifdef CONFIG_EFI extern void efi_init(void); +extern unsigned long entSuspend; + +#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) + #else #define efi_init() #define efi_idmap_init() @@ -35,8 +39,4 @@ extern void efi_init(void); #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) -extern unsigned long entSuspend; - -#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) - #endif /* _ASM_SW64_EFI_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h index 22de7d9f41a3af521800474b999d2f4599e6b978..8bd5f5357bc0c32541c6afad2fc13520d7f9a4a3 100644 --- a/arch/sw_64/include/asm/hmcall.h +++ b/arch/sw_64/include/asm/hmcall.h @@ -13,6 +13,8 @@ #define HMC_sleepen 0x05 #define HMC_rdksp 0x06 #define HMC_wrasid 0x08 +#define HMC_rdktp 0x09 +#define HMC_wrktp 0x0A #define HMC_rdptbr 0x0B #define HMC_wrptbr 0x0C #define HMC_wrksp 0x0E @@ -150,6 +152,11 @@ __CALL_HMC_VOID(wrfen); __CALL_HMC_VOID(sleepen); __CALL_HMC_VOID(mtinten); +__CALL_HMC_VOID(rdktp); +#define restore_ktp() rdktp() +__CALL_HMC_VOID(wrktp); +#define save_ktp() wrktp() + __CALL_HMC_R0(rdps, unsigned long); __CALL_HMC_R0(rdusp, unsigned long); diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h index 30d3ccbabff05f48338e3b5035288f9ac3ddfcb6..67b4ff594074b78ecd3b4e49232329ce41ded714 100644 --- a/arch/sw_64/include/asm/kvm_asm.h +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -10,6 +10,7 @@ #define SW64_KVM_EXIT_SHUTDOWN 12 #define SW64_KVM_EXIT_TIMER 13 #define SW64_KVM_EXIT_IPI 14 +#define SW64_KVM_EXIT_STOP 16 #define SW64_KVM_EXIT_RESTART 17 #define SW64_KVM_EXIT_FATAL_ERROR 22 #define SW64_KVM_EXIT_DEBUG 24 @@ -25,6 +26,7 @@ {12, "SHUTDOWN" }, \ {13, "TIMER" }, \ {14, "IPI" }, \ + {16, "STOP" }, \ {17, "RESTART" }, \ {22, "FATAL_ERROR" }, \ {23, "MEMHOTPLUG" }, \ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h index 02d7131f02865c5f2b832ee5db5be6aec8e13fed..5433a3b21b871b7014099b27d18b574fad18d363 100644 --- a/arch/sw_64/include/asm/kvm_host.h +++ b/arch/sw_64/include/asm/kvm_host.h @@ -64,6 +64,7 @@ struct kvm_vcpu_arch { /* Virtual clock device */ struct hrtimer hrt; unsigned long timer_next_event; + unsigned long vtimer_freq; int first_run; int halted; int stopped; @@ -96,12 +97,34 @@ struct kvm_vm_stat { }; struct kvm_vcpu_stat { + u64 pid; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 migration_set_dirty; + u64 shutdown_exits; + u64 restart_exits; + u64 stop_exits; + u64 ipi_exits; + u64 timer_exits; + u64 debug_exits; +#ifdef CONFIG_KVM_MEMHOTPLUG + u64 memhotplug_exits; +#endif + u64 fatal_error_exits; + u64 halt_exits; u64 halt_successful_poll; u64 halt_attempted_poll; + u64 halt_wakeup; u64 halt_poll_success_ns; u64 halt_poll_fail_ns; - u64 halt_wakeup; u64 halt_poll_invalid; + u64 signal_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; }; #ifdef CONFIG_KVM_MEMHOTPLUG diff --git a/arch/sw_64/include/asm/msi.h b/arch/sw_64/include/asm/msi.h index ca5850eb5957eb45014c8b3edd4422b5fc2fe4a5..f27a1fcf6a207bd8c83e80ef938155122023abf8 100644 --- a/arch/sw_64/include/asm/msi.h +++ b/arch/sw_64/include/asm/msi.h @@ -17,8 +17,18 @@ #define MSIX_MSG_ADDR (0x91abc0UL) +#define VT_MSIX_MSG_ADDR (0x8000fee00000UL) +#define VT_MSIX_ADDR_DEST_ID_SHIFT 12 +#define VT_MSIX_ADDR_DEST_ID_MASK (0xff << VT_MSIX_ADDR_DEST_ID_SHIFT) +#define VT_MSIX_ADDR_DEST_ID(dest) \ + (((dest) << VT_MSIX_ADDR_DEST_ID_SHIFT) & VT_MSIX_ADDR_DEST_ID_MASK) + + #ifdef CONFIG_PCI_MSI +extern void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs); extern int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector); extern int msi_compose_msg(unsigned int irq, struct msi_msg *msg); extern void sw64_irq_noop(struct irq_data *d); extern struct irq_chip sw64_irq_chip; @@ -26,9 +36,20 @@ extern struct irq_chip sw64_irq_chip; #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN #define MSI_ADDR_BASE_HI 0 #define MSI_ADDR_BASE_LO 0x91abc0 -struct sw6_msi_chip_data { + +struct sw64_msi_chip_data { + spinlock_t cdata_lock; + unsigned long msi_config; + unsigned long rc_node; + unsigned long rc_index; unsigned int msi_config_index; + unsigned int dst_cpu; + unsigned int vector; + unsigned int prev_cpu; + unsigned int prev_vector; + bool move_in_progress; }; + extern void arch_init_msi_domain(struct irq_domain *domain); enum irq_alloc_type { IRQ_ALLOC_TYPE_MSI, diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h index ab79d503b84daff89f18ca06771bcc8893f96b0c..de175b3c1043f86d4e81be7e78d92554de62b944 100644 --- a/arch/sw_64/include/asm/pci.h +++ b/arch/sw_64/include/asm/pci.h @@ -24,6 +24,15 @@ struct piu_saved { unsigned long epdmabar; unsigned long msiaddr; unsigned long msiconfig[256]; + unsigned long iommuexcpt_ctrl; + unsigned long dtbaseaddr; + unsigned long hpintconfig; + unsigned long pmeintconfig; + unsigned long aererrintconfig; + unsigned long intaconfig; + unsigned long intbconfig; + unsigned long intcconfig; + unsigned long intdconfig; }; /* A controller. Used to manage multiple PCI busses. */ diff --git a/arch/sw_64/include/asm/perf_event.h b/arch/sw_64/include/asm/perf_event.h index 4212342334d5d12926166235e69ee02806aea694..382a74e8501124b2f646a956940a6866581b694c 100644 --- a/arch/sw_64/include/asm/perf_event.h +++ b/arch/sw_64/include/asm/perf_event.h @@ -10,6 +10,7 @@ struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs #endif #endif /* _ASM_SW64_PERF_EVENT_H */ diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h index 9572b4709ff45303a02527490e1554f04d678118..5d061d22824a894fac13824d14ba900638c1324e 100644 --- a/arch/sw_64/include/asm/pgalloc.h +++ b/arch/sw_64/include/asm/pgalloc.h @@ -15,26 +15,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { - pmd_set(pmd, (pte_t *)__va(page_to_pa(pte))); + unsigned long pfn = page_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); } #define pmd_pgtable(pmd) pmd_page(pmd) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { - pmd_set(pmd, pte); + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); } static inline void -p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { - p4d_set(p4d, pud); + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PFN_SHIFT) | _PAGE_TABLE)); } static inline void -pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) { - pud_set(pud, pmd); + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PFN_SHIFT) | _PAGE_TABLE)); } extern pgd_t *pgd_alloc(struct mm_struct *mm); diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h index 79e2ccdc6003ad6b014f31c6f5bc8123d9e4efe6..1a5828c4b6d64bee85d13ec6b2ef48090e6b10ee 100644 --- a/arch/sw_64/include/asm/pgtable.h +++ b/arch/sw_64/include/asm/pgtable.h @@ -27,20 +27,37 @@ struct vm_area_struct; * within a page table are directly modified. Thus, the following * hook is made available. */ -#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +} + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { set_pte(ptep, pteval); } -#define set_pmd(pmdptr, pmdval) ((*(pmdptr)) = (pmdval)) +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmdval) { set_pmd(pmdp, pmdval); } +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} /* PGDIR_SHIFT determines what a forth-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -211,21 +228,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pmd; } -static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) -{ - pmd_val(*pmdp) = _PAGE_TABLE | (virt_to_pfn(ptep) << _PFN_SHIFT); -} - -static inline void pud_set(pud_t *pudp, pmd_t *pmdp) -{ - pud_val(*pudp) = _PAGE_TABLE | (virt_to_pfn(pmdp) << _PFN_SHIFT); -} - -static inline void p4d_set(p4d_t *p4dp, pud_t *pudp) -{ - p4d_val(*p4dp) = _PAGE_TABLE | (virt_to_pfn(pudp) << _PFN_SHIFT); -} - static inline unsigned long pmd_page_vaddr(pmd_t pmd) { return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PFN_SHIFT); @@ -304,7 +306,13 @@ static inline int pmd_bad(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { - return pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE); + /* + * Checking for _PAGE_PSE is needed too because + * split_huge_page will temporarily clear the valid bit (but + * the _PAGE_PSE flag will remain set at all times while the + * _PAGE_VALID bit is clear). + */ + return pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE | _PAGE_PSE); } static inline void pmd_clear(pmd_t *pmdp) diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h index c52e7fd7a03993fb758190fbd4d0f8d35cfb637e..03f098bb4cadb2198a9ddd154f9c9abfa68166d7 100644 --- a/arch/sw_64/include/asm/platform.h +++ b/arch/sw_64/include/asm/platform.h @@ -11,10 +11,19 @@ struct sw64_platform_ops { void (*ops_fixup)(void); }; +#ifdef CONFIG_EFI +#define BIOS_VERSION_GUID EFI_GUID(0xc47a23c3, 0xcebb, 0x4cc9, 0xa5, 0xe2, 0xde, 0xd0, 0x8f, 0xe4, 0x20, 0xb5) + +#define BIOS_SUPPORT_RESET_CLALLBACK(bios_version) ((bios_version) != NULL) + +extern unsigned long bios_version; + +#endif extern struct sw64_platform_ops *sw64_platform; extern struct sw64_platform_ops xuelang_ops; +extern struct boot_params *sunway_boot_params; extern void sw64_halt(void); extern void sw64_poweroff(void); diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h index 886f28635dd45343a2f3b19e8b2909996eb90944..4c1065b61af22d514224c7beba7a68394187af2f 100644 --- a/arch/sw_64/include/asm/processor.h +++ b/arch/sw_64/include/asm/processor.h @@ -12,7 +12,7 @@ #include #define task_pt_regs(task) \ - ((struct pt_regs *) (task_stack_page(task) + 2 * PAGE_SIZE) - 1) + ((struct pt_regs *) (task->stack + THREAD_SIZE) - 1) /* * Returns current instruction pointer ("program counter"). @@ -96,4 +96,8 @@ static inline void spin_lock_prefetch(const void *ptr) } #endif +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("halt"); +} #endif /* _ASM_SW64_PROCESSOR_H */ diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h index 4db8b61fc09394a487db69a677bd5a552f7d64fb..2c60bc6730ad406d04a9ba44193380236fd19227 100644 --- a/arch/sw_64/include/asm/ptrace.h +++ b/arch/sw_64/include/asm/ptrace.h @@ -4,7 +4,6 @@ #include #include -#include #include /* @@ -13,40 +12,51 @@ */ struct pt_regs { - unsigned long r0; - unsigned long r1; - unsigned long r2; - unsigned long r3; - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - /* r16 ~ r18 saved by hmcode */ - unsigned long r19; - unsigned long r20; - unsigned long r21; - unsigned long r22; - unsigned long r23; - unsigned long r24; - unsigned long r25; - unsigned long r26; - unsigned long r27; - unsigned long r28; + union { + struct user_pt_regs user_regs; + struct { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r16; + unsigned long r17; + unsigned long r18; + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long gp; + unsigned long sp; + unsigned long pc; + unsigned long ps; + }; + }; /* These are saved by HMcode: */ - unsigned long ps; - unsigned long pc; - unsigned long gp; - unsigned long r16; - unsigned long r17; - unsigned long r18; + unsigned long hm_ps; + unsigned long hm_pc; + unsigned long hm_gp; + unsigned long hm_r16; + unsigned long hm_r17; + unsigned long hm_r18; }; #define arch_has_single_step() (1) @@ -58,14 +68,9 @@ struct pt_regs { #define kernel_stack_pointer(regs) ((unsigned long)((regs) + 1)) #define instruction_pointer_set(regs, val) ((regs)->pc = val) - -#define current_pt_regs() \ - ((struct pt_regs *) ((char *)current_thread_info() + 2 * PAGE_SIZE) - 1) -#define signal_pt_regs current_pt_regs - #define force_successful_syscall_return() (current_pt_regs()->r0 = 0) -#define MAX_REG_OFFSET (offsetof(struct pt_regs, r18)) +#define MAX_REG_OFFSET (offsetof(struct pt_regs, ps)) extern short regoffsets[]; diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h index 0573361dc840d021f3acf88989e4eeae2ce5be97..fed9d682f1d745862bd0ea20a6ef02abd2db2505 100644 --- a/arch/sw_64/include/asm/smp.h +++ b/arch/sw_64/include/asm/smp.h @@ -2,14 +2,16 @@ #ifndef _ASM_SW64_SMP_H #define _ASM_SW64_SMP_H -#include -#include -#include -#include -#include #include +#include +#include +#include #include +#include +#include +#include + /* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ extern cpumask_t core_start; @@ -55,7 +57,13 @@ struct smp_rcb_struct { #define INIT_SMP_RCB ((struct smp_rcb_struct *) __va(0x820000UL)) #define hard_smp_processor_id() __hard_smp_processor_id() -#define raw_smp_processor_id() (current_thread_info()->cpu) + +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#include +#define raw_smp_processor_id() (*((unsigned int *)((void *)current + TASK_CPU))) +#endif /* The map from sequential logical cpu number to hard cid. */ extern int __cpu_to_rcid[NR_CPUS]; @@ -70,92 +78,15 @@ extern int __rcid_to_cpu[NR_CPUS]; #define cpu_physical_id(cpu) __cpu_to_rcid[cpu] extern unsigned long tidle_pcb[NR_CPUS]; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -struct smp_ops { - void (*smp_prepare_boot_cpu)(void); - void (*smp_prepare_cpus)(unsigned int max_cpus); - void (*smp_cpus_done)(unsigned int max_cpus); - - void (*stop_other_cpus)(int wait); - void (*smp_send_reschedule)(int cpu); - - int (*cpu_up)(unsigned int cpu, struct task_struct *tidle); - int (*cpu_disable)(void); - void (*cpu_die)(unsigned int cpu); - void (*play_dead)(void); - - void (*send_call_func_ipi)(const struct cpumask *mask); - void (*send_call_func_single_ipi)(int cpu); -}; - -extern struct smp_ops smp_ops; - -static inline void smp_send_stop(void) -{ - smp_ops.stop_other_cpus(0); -} - -static inline void stop_other_cpus(void) -{ - smp_ops.stop_other_cpus(1); -} - -static inline void smp_prepare_boot_cpu(void) -{ - smp_ops.smp_prepare_boot_cpu(); -} - -static inline void smp_prepare_cpus(unsigned int max_cpus) -{ - smp_ops.smp_prepare_cpus(max_cpus); -} - -static inline void smp_cpus_done(unsigned int max_cpus) -{ - smp_ops.smp_cpus_done(max_cpus); -} - -static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) -{ - return smp_ops.cpu_up(cpu, tidle); -} - -static inline int __cpu_disable(void) -{ - return smp_ops.cpu_disable(); -} - -static inline void __cpu_die(unsigned int cpu) -{ - smp_ops.cpu_die(cpu); -} - -static inline void play_dead(void) -{ - smp_ops.play_dead(); -} - -static inline void smp_send_reschedule(int cpu) -{ - smp_ops.smp_send_reschedule(cpu); -} - -static inline void arch_send_call_function_single_ipi(int cpu) -{ - smp_ops.send_call_func_single_ipi(cpu); -} - -static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - smp_ops.send_call_func_ipi(mask); -} - +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ #else /* CONFIG_SMP */ -static inline void play_dead(void) -{ - BUG(); /*Fixed me*/ -} #define hard_smp_processor_id() 0 #define smp_call_function_on_cpu(func, info, wait, cpu) ({ 0; }) #define cpu_to_rcid(cpu) ((int)whami()) diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h index 521ab099f94b80ca6f31abc0f5da5447a6debeb0..7b6d7bfc95952197e91421b8567f0e0b696fd2a0 100644 --- a/arch/sw_64/include/asm/suspend.h +++ b/arch/sw_64/include/asm/suspend.h @@ -38,6 +38,7 @@ struct processor_state { struct callee_saved_regs regs; struct callee_saved_fpregs fpregs; unsigned long fpcr; + unsigned long ktp; #ifdef CONFIG_HIBERNATION unsigned long sp; struct vcpucb vcb; diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h index 893bac1c621b499d0fabeb495097616606d34fae..8b437ef73cde74b94cf52fe2c7f6d9661e7c4070 100644 --- a/arch/sw_64/include/asm/sw64_init.h +++ b/arch/sw_64/include/asm/sw64_init.h @@ -10,6 +10,7 @@ struct sw64_early_init_ops { void (*setup_core_start)(struct cpumask *cpumask); unsigned long (*get_node_mem)(int nodeid); + void (*get_smp_info)(void); }; struct sw64_pci_init_ops { diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h index c9637d32e1bec199f259b858dd7463e3bd83287e..ed0649b786de391bf5f25b42f0f597bcada099c2 100644 --- a/arch/sw_64/include/asm/thread_info.h +++ b/arch/sw_64/include/asm/thread_info.h @@ -5,7 +5,6 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -#include #include #include @@ -25,12 +24,10 @@ struct pcb_struct { struct thread_info { struct pcb_struct pcb; /* hmcode state */ - struct task_struct *task; /* main task structure */ unsigned int flags; /* low level flags */ unsigned int ieee_state; /* see fpu.h */ mm_segment_t addr_limit; /* thread address space */ - unsigned int cpu; /* current CPU */ int preempt_count; /* 0 => preemptible, <0 => BUG */ unsigned int status; /* thread-synchronous flags */ @@ -58,14 +55,10 @@ static __always_inline u64 rtid(void) */ #define INIT_THREAD_INFO(tsk) \ { \ - .task = &tsk, \ .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ } -/* How to get the thread information struct from C. */ -register struct thread_info *__current_thread_info __asm__("$8"); -#define current_thread_info() __current_thread_info #endif /* __ASSEMBLY__ */ diff --git a/arch/sw_64/include/uapi/asm/bpf_perf_event.h b/arch/sw_64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..b551b741653d251d6155a214023b2215ae02e871 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h index 254d6cbf1eb146c53c766c715f2980fb3017ecf5..0ca8c10b855034cc809a1d0e98547ae131c40595 100644 --- a/arch/sw_64/include/uapi/asm/kvm.h +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -6,6 +6,9 @@ * KVM SW specific structures and definitions. */ #define SWVM_IRQS 256 +#define IRQ_PENDING_INTX_SHIFT 16 +#define IRQ_PENDING_MSI_VECTORS_SHIFT 17 + enum SW64_KVM_IRQ { SW64_KVM_IRQ_IPI = 27, SW64_KVM_IRQ_TIMER = 9, diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h index febde5fd72fb7f95b442060352e6c4a38a7e8fbc..871ad4663d1dbd29cd23395b977615323c67d81e 100644 --- a/arch/sw_64/include/uapi/asm/perf_regs.h +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -20,6 +20,9 @@ enum perf_event_sw64_regs { PERF_REG_SW64_R13, PERF_REG_SW64_R14, PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, PERF_REG_SW64_R19, PERF_REG_SW64_R20, PERF_REG_SW64_R21, @@ -30,16 +33,9 @@ enum perf_event_sw64_regs { PERF_REG_SW64_R26, PERF_REG_SW64_R27, PERF_REG_SW64_R28, - PERF_REG_SW64_HAE, - PERF_REG_SW64_TRAP_A0, - PERF_REG_SW64_TRAP_A1, - PERF_REG_SW64_TRAP_A2, - PERF_REG_SW64_PS, - PERF_REG_SW64_PC, PERF_REG_SW64_GP, - PERF_REG_SW64_R16, - PERF_REG_SW64_R17, - PERF_REG_SW64_R18, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, PERF_REG_SW64_MAX, }; #endif /* _UAPI_ASM_SW64_PERF_REGS_H */ diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile index 55113c1da6f9c32477b63334fdef44f498a79d79..667e06039987a7d304d8cafc4faa0c3cde14e528 100644 --- a/arch/sw_64/kernel/Makefile +++ b/arch/sw_64/kernel/Makefile @@ -30,7 +30,7 @@ obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o obj-$(CONFIG_AUDIT) += audit.o obj-$(CONFIG_PCI) += pci_common.o obj-$(CONFIG_RELOCATABLE) += relocate.o -obj-$(CONFIG_DEBUG_FS) += segvdbg.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o unaligned.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o ifeq ($(CONFIG_DEBUG_FS)$(CONFIG_NUMA),yy) diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c index 12b3311c1bcb75f25a825de2bf0d74083b5051c8..1250a310dd7139f3d116a38fa8e287de30baa0a8 100644 --- a/arch/sw_64/kernel/asm-offsets.c +++ b/arch/sw_64/kernel/asm-offsets.c @@ -5,6 +5,7 @@ * and format the required data. */ +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ #include #include #include @@ -15,11 +16,11 @@ #include "traps.c" + void foo(void) { - DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); @@ -27,11 +28,16 @@ void foo(void) DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_SMP + DEFINE(TASK_CPU, offsetof(struct task_struct, cpu)); +#endif BLANK(); OFFSET(PSTATE_REGS, processor_state, regs); OFFSET(PSTATE_FPREGS, processor_state, fpregs); OFFSET(PSTATE_FPCR, processor_state, fpcr); + OFFSET(PSTATE_KTP, processor_state, ktp); #ifdef CONFIG_HIBERNATION OFFSET(PSTATE_SP, processor_state, sp); #endif @@ -78,6 +84,9 @@ void foo(void) DEFINE(PT_REGS_R13, offsetof(struct pt_regs, r13)); DEFINE(PT_REGS_R14, offsetof(struct pt_regs, r14)); DEFINE(PT_REGS_R15, offsetof(struct pt_regs, r15)); + DEFINE(PT_REGS_R16, offsetof(struct pt_regs, r16)); + DEFINE(PT_REGS_R17, offsetof(struct pt_regs, r17)); + DEFINE(PT_REGS_R18, offsetof(struct pt_regs, r18)); DEFINE(PT_REGS_R19, offsetof(struct pt_regs, r19)); DEFINE(PT_REGS_R20, offsetof(struct pt_regs, r20)); DEFINE(PT_REGS_R21, offsetof(struct pt_regs, r21)); @@ -88,12 +97,16 @@ void foo(void) DEFINE(PT_REGS_R26, offsetof(struct pt_regs, r26)); DEFINE(PT_REGS_R27, offsetof(struct pt_regs, r27)); DEFINE(PT_REGS_R28, offsetof(struct pt_regs, r28)); - DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); - DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_REGS_GP, offsetof(struct pt_regs, gp)); - DEFINE(PT_REGS_R16, offsetof(struct pt_regs, r16)); - DEFINE(PT_REGS_R17, offsetof(struct pt_regs, r17)); - DEFINE(PT_REGS_R18, offsetof(struct pt_regs, r18)); + DEFINE(PT_REGS_SP, offsetof(struct pt_regs, sp)); + DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); + DEFINE(PT_REGS_HM_PS, offsetof(struct pt_regs, hm_ps)); + DEFINE(PT_REGS_HM_PC, offsetof(struct pt_regs, hm_pc)); + DEFINE(PT_REGS_HM_GP, offsetof(struct pt_regs, hm_gp)); + DEFINE(PT_REGS_HM_R16, offsetof(struct pt_regs, hm_r16)); + DEFINE(PT_REGS_HM_R17, offsetof(struct pt_regs, hm_r17)); + DEFINE(PT_REGS_HM_R18, offsetof(struct pt_regs, hm_r18)); BLANK(); DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); diff --git a/arch/sw_64/kernel/clock.c b/arch/sw_64/kernel/clock.c index fac832803bd45910cac2540e6cc060d2e3c374e6..8f2bec5fd7825dcfd455d766c6472f6bcac43a68 100644 --- a/arch/sw_64/kernel/clock.c +++ b/arch/sw_64/kernel/clock.c @@ -1,9 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/sw/kernel/setup.c - * - * Copyright (C) 1995 Linus Torvalds - */ #include #include @@ -11,6 +6,7 @@ #include #include +#include #include #include #include @@ -29,73 +25,7 @@ #define CORE_PLL0_CFG_SHIFT 4 #define CORE_PLL2_CFG_SHIFT 18 -char curruent_policy[CPUFREQ_NAME_LEN]; - -/* Minimum CLK support */ -enum { - DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, - DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_16, DC_RESV -}; - -static int cpu_freq[14] = { - 0, 1200, 1800, 1900, - 1950, 2000, 2050, 2100, - 2150, 2200, 2250, 2300, - 2350, 2400 }; - -struct cpufreq_frequency_table sw64_clockmod_table[] = { - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {0, DC_1, 0}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {0, DC_2, 0}, - {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, - {0, DC_3, 0}, - {0, DC_4, 0}, - {0, DC_5, 0}, - {0, DC_6, 0}, - {0, DC_7, 0}, - {0, DC_8, 0}, - {0, DC_9, 0}, - {0, DC_10, 0}, - {0, DC_11, 0}, - {0, DC_12, 0}, - {0, DC_13, 0}, -{-1, DC_RESV, CPUFREQ_TABLE_END}, -}; -EXPORT_SYMBOL_GPL(sw64_clockmod_table); +char curruent_policy[CPUFREQ_NAME_LEN]; static struct clk cpu_clk = { .name = "cpu_clk", @@ -113,13 +43,13 @@ unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy) { int i; u64 val; + struct cpufreq_frequency_table *ft = policy->freq_table; - val = sw64_io_read(0, CLK_CTL); - val = val >> CORE_PLL2_CFG_SHIFT; + val = sw64_io_read(0, CLK_CTL) >> CORE_PLL2_CFG_SHIFT; - for (i = 0; i < sizeof(cpu_freq)/sizeof(int); i++) { - if (cpu_freq[val] == cpu_freq[i]) - return cpu_freq[i]; + for (i = 0; ft[i].frequency != CPUFREQ_TABLE_END; i++) { + if (val == ft[i].driver_data) + return ft[i].frequency; } return 0; } @@ -131,61 +61,41 @@ void sw64_store_policy(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(sw64_store_policy); -void sw64_set_rate(unsigned long rate) +void sw64_set_rate(unsigned int index) { unsigned int i, val; - int index = -1; + int cpu_num; - rate /= 1000000; + cpu_num = sw64_chip->get_cpu_num(); - for (i = 0; i < sizeof(cpu_freq)/sizeof(int); i++) { - if (rate == cpu_freq[i]) { - index = i; - update_cpu_freq(cpu_freq[i]); - break; - } - } - - if (index < 0) - return; + for (i = 0; i < cpu_num; i++) { + sw64_io_write(i, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(i, CLK_CTL); - sw64_io_write(0, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); - sw64_io_write(1, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); - val = sw64_io_read(0, CLK_CTL); + sw64_io_write(i, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); - sw64_io_write(0, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); - sw64_io_write(1, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + udelay(1); - udelay(1); + sw64_io_write(i, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(i, CLK_CTL); - sw64_io_write(0, CLK_CTL, CORE_CLK2_V | CLK_PRT - | index << CORE_PLL2_CFG_SHIFT); - sw64_io_write(1, CLK_CTL, CORE_CLK2_V | CLK_PRT - | index << CORE_PLL2_CFG_SHIFT); - val = sw64_io_read(0, CLK_CTL); + /* LV1 select PLL1/PLL2 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); - /* LV1 select PLL1/PLL2 */ - sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); - sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + /* Set CLK_CTL PLL0 */ + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); - /* Set CLK_CTL PLL0 */ - sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); - sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); - sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V - | index << CORE_PLL0_CFG_SHIFT); - sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V - | index << CORE_PLL0_CFG_SHIFT); + udelay(1); - udelay(1); + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); - sw64_io_write(0, CLK_CTL, val | CORE_CLK0_V - | index << CORE_PLL0_CFG_SHIFT); - sw64_io_write(1, CLK_CTL, val | CORE_CLK0_V - | index << CORE_PLL0_CFG_SHIFT); - - /* LV1 select PLL0/PLL1 */ - sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); - sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + /* LV1 select PLL0/PLL1 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + } } EXPORT_SYMBOL_GPL(sw64_set_rate); diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c index 3a32c444207d89f2c4e472786d29704798217dc6..e28e0053239cad1d59b2f568029a4052c9e6368f 100644 --- a/arch/sw_64/kernel/dup_print.c +++ b/arch/sw_64/kernel/dup_print.c @@ -3,7 +3,7 @@ #include #include -#include +#include #include #ifdef CONFIG_SW64_RRK diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c index 2f38719cc216ce2c84f0e1dfdd17d977b1324f02..bcd458a9bdad4e01cfc745451b275406aaae9e02 100644 --- a/arch/sw_64/kernel/early_init.c +++ b/arch/sw_64/kernel/early_init.c @@ -24,6 +24,7 @@ static void __init sw64_setup_platform_ops(void) asmlinkage __visible void __init sw64_start_kernel(void) { fixup_hmcall(); + save_ktp(); sw64_setup_chip_ops(); sw64_setup_platform_ops(); sw64_platform->ops_fixup(); diff --git a/arch/sw_64/kernel/entry-ftrace.S b/arch/sw_64/kernel/entry-ftrace.S index 53125495f4e54d94870ce6002590bda1c1401afa..73e8e043fc9d14fbbaa50bb164fcc4326329001b 100644 --- a/arch/sw_64/kernel/entry-ftrace.S +++ b/arch/sw_64/kernel/entry-ftrace.S @@ -90,6 +90,8 @@ stl $27, PT_REGS_R27($sp) stl $28, PT_REGS_R28($sp) stl $29, PT_REGS_GP($sp) + ldi $0, PT_REGS_SIZE($sp) + stl $0, PT_REGS_SP($sp) .endm .macro RESTORE_PT_REGS diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S index b360f0e2ddd6338b5e9392170b789d607cba2322..013656e78d08532eefe5d7fed16cc9dad2c1d29c 100644 --- a/arch/sw_64/kernel/entry.S +++ b/arch/sw_64/kernel/entry.S @@ -21,7 +21,7 @@ */ .macro SAVE_ALL - ldi $sp, -PT_REGS_PS($sp) + ldi $sp, -PT_REGS_HM_PS($sp) stl $0, PT_REGS_R0($sp) stl $1, PT_REGS_R1($sp) stl $2, PT_REGS_R2($sp) @@ -48,9 +48,40 @@ stl $25, PT_REGS_R25($sp) stl $26, PT_REGS_R26($sp) stl $27, PT_REGS_R27($sp) + ldl $1, PT_REGS_HM_R16($sp) + ldl $2, PT_REGS_HM_R17($sp) + ldl $3, PT_REGS_HM_R18($sp) + ldl $4, PT_REGS_HM_GP($sp) + ldl $5, PT_REGS_HM_PC($sp) + ldl $6, PT_REGS_HM_PS($sp) + stl $1, PT_REGS_R16($sp) + stl $2, PT_REGS_R17($sp) + stl $3, PT_REGS_R18($sp) + stl $4, PT_REGS_GP($sp) + stl $5, PT_REGS_PC($sp) + stl $6, PT_REGS_PS($sp) + and $6, 0x8, $7 + beq $7, 1f + sys_call HMC_rdusp + br $31, 2f +1: ldi $0, PT_REGS_SIZE($sp) +2: stl $0, PT_REGS_SP($sp) + sys_call HMC_rdktp .endm .macro RESTORE_ALL + ldl $1, PT_REGS_R16($sp) + ldl $2, PT_REGS_R17($sp) + ldl $3, PT_REGS_R18($sp) + ldl $4, PT_REGS_GP($sp) + ldl $5, PT_REGS_PC($sp) + ldl $6, PT_REGS_PS($sp) + stl $1, PT_REGS_HM_R16($sp) + stl $2, PT_REGS_HM_R17($sp) + stl $3, PT_REGS_HM_R18($sp) + stl $4, PT_REGS_HM_GP($sp) + stl $5, PT_REGS_HM_PC($sp) + stl $6, PT_REGS_HM_PS($sp) ldl $0, PT_REGS_R0($sp) ldl $1, PT_REGS_R1($sp) ldl $2, PT_REGS_R2($sp) @@ -77,7 +108,7 @@ ldl $26, PT_REGS_R26($sp) ldl $27, PT_REGS_R27($sp) ldl $28, PT_REGS_R28($sp) - ldi $sp, PT_REGS_PS($sp) + ldi $sp, PT_REGS_HM_PS($sp) .endm /* @@ -89,9 +120,7 @@ .ent entInt entInt: SAVE_ALL - ldi $8, 0x3fff ldi $26, ret_from_sys_call - bic $sp, $8, $8 mov $sp, $19 call $31, do_entInt .end entInt @@ -101,9 +130,7 @@ entInt: .ent entArith entArith: SAVE_ALL - ldi $8, 0x3fff ldi $26, ret_from_sys_call - bic $sp, $8, $8 mov $sp, $18 call $31, do_entArith .end entArith @@ -113,9 +140,7 @@ entArith: .ent entMM entMM: SAVE_ALL - ldi $8, 0x3fff ldi $26, ret_from_sys_call - bic $sp, $8, $8 mov $sp, $19 call $31, do_page_fault .end entMM @@ -125,10 +150,8 @@ entMM: .ent entIF entIF: SAVE_ALL - ldi $8, 0x3fff ldi $26, ret_from_sys_call - bic $sp, $8, $8 - mov $sp, $17 + mov $sp, $18 call $31, do_entIF .end entIF @@ -143,8 +166,6 @@ entIF: .ent entUna entUna: SAVE_ALL - ldi $8, 0x3fff - bic $sp, $8, $8 mov $sp, $19 ldl $0, PT_REGS_PS($sp) and $0, 8, $0 /* user mode ? */ @@ -176,8 +197,7 @@ entUna: entSys: SAVE_ALL - ldi $8, 0x3fff - bic $sp, $8, $8 + ldl $0, PT_REGS_R0($sp) ldi $4, NR_SYSCALLS($31) stl $16, PT_REGS_R16($sp) ldi $5, sys_call_table @@ -210,6 +230,10 @@ ret_from_sys_call: and $0, 8, $0 beq $0, ret_to_kernel ret_to_user: +#ifdef CONFIG_DEBUG_RSEQ + mov $sp, $16 + call $26, rseq_syscall +#endif /* Make sure need_resched and sigpending don't change between sampling and the rti. */ ldi $16, 7 @@ -395,8 +419,8 @@ __switch_to: ldl $13, TASK_THREAD_S4($17) ldl $14, TASK_THREAD_S5($17) ldl $15, TASK_THREAD_S6($17) - ldi $8, 0x3fff - bic $sp, $8, $8 + mov $17, $8 + sys_call HMC_wrktp mov $16, $0 ret .end __switch_to diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c index 3d99f723dced5c5455039330f64cb7c4ebdf627f..fb25ffe3dbdaf4f26bf4389e63d37fd1aaaa754b 100644 --- a/arch/sw_64/kernel/ftrace.c +++ b/arch/sw_64/kernel/ftrace.c @@ -100,10 +100,12 @@ void arch_ftrace_update_code(int command) int __init ftrace_dyn_arch_init(void) { - init_thread_info.dyn_ftrace_addr = FTRACE_ADDR; + struct thread_info *ti = task_thread_info(&init_task); + + ti->dyn_ftrace_addr = FTRACE_ADDR; #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - init_thread_info.dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; + ti->dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; #endif return 0; } diff --git a/arch/sw_64/kernel/head.S b/arch/sw_64/kernel/head.S index 7cce2a8859e566f74142a15ec54024faa484c967..c855d31de7154097d6aa059f2a9e0bb688adf6bc 100644 --- a/arch/sw_64/kernel/head.S +++ b/arch/sw_64/kernel/head.S @@ -22,9 +22,11 @@ __start: br $27, 1f 1: ldgp $29, 0($27) /* We need to get current_task_info loaded up... */ - ldi $8, init_thread_union + ldi $8, init_task + ldl $30, TASK_STACK($8) /* ... and find our stack ... */ - ldi $30, ASM_THREAD_SIZE($8) + ldi $30, ASM_THREAD_SIZE($30) + /* ... and then we can clear bss data. */ ldi $16, __bss_start ldi $18, __bss_stop @@ -38,8 +40,10 @@ __start: call $26, relocate_kernel ldl $29, 0($30) addl $29, $0, $29 + addl $8, $0, $8 + ldi $30, 8($30) /* Repoint the sp into the new kernel image */ - ldi $30, ASM_THREAD_SIZE($8) + addl $30, $0, $30 #endif /* ... and then we can start the kernel. */ call $26, sw64_start_kernel @@ -68,11 +72,13 @@ __smp_callin: s4addl $0, $1, $1 ldw $0, 0($1) # Get logical cpu number - ldi $2, tidle_ksp + ldi $2, idle_task_pointer s8addl $0, $2, $2 - ldl $30, 0($2) # Get ksp of idle thread + ldl $8, 0($2) # Get ksp of idle thread + sys_call HMC_wrktp - ldi $8, -ASM_THREAD_SIZE($30) # Find "current" + ldl $30, TASK_STACK($8) + ldi $30, ASM_THREAD_SIZE($30) call $26, smp_callin sys_call HMC_halt diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S index 23bab0d6edd87567c02ef218d7d9305e7c4efdc6..1e9abcf77beebf3327ff57252d54ba14de3bee9b 100644 --- a/arch/sw_64/kernel/hibernate_asm.S +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -30,6 +30,7 @@ ENTRY(swsusp_arch_suspend) rfpcr $f0 fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) stl sp, PSTATE_SP($16) call swsusp_save ldi $16, hibernate_state @@ -112,9 +113,8 @@ $hibernate_setfpec_over: vldd $f9, CALLEE_F9($1) ldl sp, PSTATE_SP($16) + ldl $8, PSTATE_KTP($16) - ldi $8, 0x3fff - bic sp, $8, $8 ldi $0, 0($31) diff --git a/arch/sw_64/kernel/hmcall.c b/arch/sw_64/kernel/hmcall.c index 3d60569a4f6f0845a83ecba9bce30db0c9761f2c..e2be9f618e57690e095f47fcf86059c158b5e067 100644 --- a/arch/sw_64/kernel/hmcall.c +++ b/arch/sw_64/kernel/hmcall.c @@ -76,6 +76,22 @@ static inline void fixup_wrasid(void) entry[9] = 0x1ef00000; /* pri_ret/b p23 */ } +static inline void fixup_rdktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdktp)); + + entry[0] = 0x95161000; /* pri_ldl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrktp)); + + entry[0] = 0xb5161000; /* pri_stl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + void __init fixup_hmcall(void) { #if defined(CONFIG_SUBARCH_C3B) @@ -83,6 +99,8 @@ void __init fixup_hmcall(void) fixup_wrtp(); fixup_tbiasid(); fixup_wrasid(); + fixup_rdktp(); + fixup_wrktp(); #endif } diff --git a/arch/sw_64/kernel/idle.c b/arch/sw_64/kernel/idle.c index 729af77a228460a9095a0590c6bc7b07bf15981e..8193a7093b570cb79838928d4320d86133608971 100644 --- a/arch/sw_64/kernel/idle.c +++ b/arch/sw_64/kernel/idle.c @@ -7,27 +7,29 @@ #include #include #include - -#ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) -{ - play_dead(); -} -#endif +#include void cpu_idle(void) { - int i; - local_irq_enable(); cpu_relax(); - if (is_in_guest()) - hcall(HCALL_HALT, 0, 0, 0); - else { - for (i = 0; i < 16; i++) - asm("nop"); - asm("halt"); + if (is_in_guest()) { + if (!need_resched()) + hcall(HCALL_HALT, 0, 0, 0); + } else { + asm( + ".globl __idle_start\n" + "__idle_start = .\n" + "ldw $1, %0($8)\n" + "srl $1, %1, $1\n" + "blbs $1, $need_resched\n" + "halt\n" + ".globl __idle_end\n" + "__idle_end = .\n" + "$need_resched:" + :: "i"(TI_FLAGS), "i"(TIF_NEED_RESCHED) + : "$1"); } } diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c index f1f74a968cbc7f3dcf22e12cd334c00ccc2e9ceb..a30817c47e66ea33a974d5b319b2086ab2b4faca 100644 --- a/arch/sw_64/kernel/perf_event.c +++ b/arch/sw_64/kernel/perf_event.c @@ -10,24 +10,17 @@ /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { - /* Number of events currently scheduled onto this cpu. - * This tells how many entries in the arrays below - * are valid. + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. */ - int n_events; - /* Track counter usage of each counter */ -#define PMC_IN_USE 1 -#define PMC_NOT_USE 0 - int pmcs[MAX_HWEVENTS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; /* Array of events current scheduled on this cpu. */ struct perf_event *event[MAX_HWEVENTS]; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); -static void sw64_pmu_start(struct perf_event *event, int flags); -static void sw64_pmu_stop(struct perf_event *event, int flags); - struct sw64_perf_event { /* pmu index */ int counter; @@ -375,6 +368,55 @@ static unsigned long sw64_perf_event_update(struct perf_event *event, * */ +/* + * pmu->start: start the event. + */ +static void sw64_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + sw64_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + + /* counting in selected modes, for both counters */ + wrperfmon(PERFMON_CMD_PM, hwc->config_base); + if (hwc->idx == PERFMON_PC0) { + wrperfmon(PERFMON_CMD_EVENT_PC0, hwc->event_base); + wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC0); + } else { + wrperfmon(PERFMON_CMD_EVENT_PC1, hwc->event_base); + wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC1); + } +} + +/* + * pmu->stop: stop the counter + */ +static void sw64_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + wrperfmon(PERFMON_CMD_DISABLE, hwc->idx == 0 ? + PERFMON_DISABLE_ARGS_PC0 : + PERFMON_DISABLE_ARGS_PC1); + hwc->state |= PERF_HES_STOPPED; + barrier(); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + sw64_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } +} + /* * pmu->add: add the event to PMU. */ @@ -387,17 +429,13 @@ static int sw64_pmu_add(struct perf_event *event, int flags) local_irq_save(irq_flags); - if (cpuc->pmcs[hwc->idx] == PMC_IN_USE) { + if (__test_and_set_bit(hwc->idx, cpuc->used_mask)) { err = -ENOSPC; goto out; } - cpuc->pmcs[hwc->idx] = PMC_IN_USE; cpuc->event[hwc->idx] = event; - - cpuc->n_events++; - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) sw64_pmu_start(event, PERF_EF_RELOAD); @@ -424,8 +462,7 @@ static void sw64_pmu_del(struct perf_event *event, int flags) sw64_pmu_stop(event, PERF_EF_UPDATE); cpuc->event[hwc->idx] = NULL; - cpuc->pmcs[hwc->idx] = PMC_NOT_USE; - cpuc->n_events--; + __clear_bit(event->hw.idx, cpuc->used_mask); /* Absorb the final count and turn off the event. */ perf_event_update_userpage(event); @@ -433,55 +470,6 @@ static void sw64_pmu_del(struct perf_event *event, int flags) local_irq_restore(irq_flags); } -/* - * pmu->start: start the event. - */ -static void sw64_pmu_start(struct perf_event *event, int flags) -{ - struct hw_perf_event *hwc = &event->hw; - - if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) - return; - - if (flags & PERF_EF_RELOAD) { - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - sw64_perf_event_set_period(event, hwc, hwc->idx); - } - - hwc->state = 0; - - /* counting in selected modes, for both counters */ - wrperfmon(PERFMON_CMD_PM, hwc->config_base); - if (hwc->idx == PERFMON_PC0) { - wrperfmon(PERFMON_CMD_EVENT_PC0, hwc->event_base); - wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC0); - } else { - wrperfmon(PERFMON_CMD_EVENT_PC1, hwc->event_base); - wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC1); - } -} - -/* - * pmu->stop: stop the counter - */ -static void sw64_pmu_stop(struct perf_event *event, int flags) -{ - struct hw_perf_event *hwc = &event->hw; - - if (!(hwc->state & PERF_HES_STOPPED)) { - wrperfmon(PERFMON_CMD_DISABLE, hwc->idx == 0 ? - PERFMON_DISABLE_ARGS_PC0 : - PERFMON_DISABLE_ARGS_PC1); - hwc->state |= PERF_HES_STOPPED; - barrier(); - } - - if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { - sw64_perf_event_update(event, hwc, hwc->idx, 0); - hwc->state |= PERF_HES_UPTODATE; - } -} - /* * pmu->read: read and update the counter */ diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c index b036f213936bc6d79214c9b7bdf1ab9a82a40b69..8fc3597d9e41209dc4ba01da3babeebaf00f7e90 100644 --- a/arch/sw_64/kernel/perf_regs.c +++ b/arch/sw_64/kernel/perf_regs.c @@ -8,7 +8,24 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) if (WARN_ON_ONCE((u32)idx >= PERF_REG_SW64_MAX)) return 0; - return ((unsigned long *)regs)[idx]; + switch (idx) { + case PERF_REG_SW64_R16: + return regs->r16; + case PERF_REG_SW64_R17: + return regs->r17; + case PERF_REG_SW64_R18: + return regs->r18; + case PERF_REG_SW64_R19 ... PERF_REG_SW64_R28: + return ((unsigned long *)regs)[idx - 3]; + case PERF_REG_SW64_GP: + return regs->gp; + case PERF_REG_SW64_SP: + return (user_mode(regs) ? rdusp() : (u64)(regs + 1)); + case PERF_REG_SW64_PC: + return regs->pc; + default: + return ((unsigned long *)regs)[idx]; + } } #define REG_RESERVED (~((1ULL << PERF_REG_SW64_MAX) - 1)) diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c index 792ee1a9c2b1a261706e1e339c7c6ff85e5de20d..ebdf7d894805e8f2c0a1853d853d5d2ef8bf6c09 100644 --- a/arch/sw_64/kernel/relocate.c +++ b/arch/sw_64/kernel/relocate.c @@ -239,9 +239,6 @@ unsigned int __init relocate_kernel(void) if (plat_post_relocation(offset)) goto out; - /* The current thread is now within the relocated image */ - __current_thread_info = RELOCATED(&init_thread_union); - /* Return the new kernel's offset */ return offset; } diff --git a/arch/sw_64/kernel/reset.c b/arch/sw_64/kernel/reset.c index 7f91a97d9d1269e3a4236456bcd9cebc2482d3e6..22507e539c862140731f3f61153b36f562d7f662 100644 --- a/arch/sw_64/kernel/reset.c +++ b/arch/sw_64/kernel/reset.c @@ -77,6 +77,7 @@ static void default_restart(void) void (*pm_restart)(void); void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); void (*pm_halt)(void); diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index 5ec55554caf63908fdf4df3000f764fb475f8864..16c13646f3c7312729a610965785c7921fc6520b 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -178,6 +178,15 @@ static void __init reserve_crashkernel(void) if (ret || !crash_size) return; + if (!crash_size) { + pr_warn("size of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!crash_base) { + pr_warn("base of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!memblock_is_region_memory(crash_base, crash_size)) memblock_add(crash_base, crash_size); @@ -752,6 +761,8 @@ setup_arch(char **cmdline_p) setup_socket_info(); show_socket_mem_layout(); sw64_chip_init->early_init.setup_core_start(&core_start); + if (is_guest_or_emul()) + sw64_chip_init->early_init.get_smp_info(); setup_sched_clock(); #ifdef CONFIG_GENERIC_SCHED_CLOCK @@ -817,6 +828,8 @@ setup_arch(char **cmdline_p) sw64_memblock_init(); + reserve_crashkernel(); + /* Reserve large chunks of memory for use by CMA for KVM. */ #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) sw64_kvm_reserve(); @@ -845,7 +858,6 @@ setup_arch(char **cmdline_p) */ sw64_init_arch(); - reserve_crashkernel(); /* Reserve standard resources. */ reserve_std_resources(); diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c index 3a5757b234c6d08bcaae7fb41cc6b114014a608f..6414654a0f595833e2ad16c75f4ea9e7e705978a 100644 --- a/arch/sw_64/kernel/signal.c +++ b/arch/sw_64/kernel/signal.c @@ -325,6 +325,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) sigset_t *oldset = sigmask_to_save(); int ret; + rseq_signal_deliver(ksig, regs); + ret = setup_rt_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, 0); @@ -427,6 +429,7 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags, } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + rseq_handle_notify_resume(NULL, regs); } } local_irq_disable(); diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 1bf289b6a89e7b0986b483756ff751af4b58ab87..003fee7f0ea19859fc430004a2b488c81491c34e 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(__cpu_to_rcid); int __rcid_to_cpu[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__rcid_to_cpu); -void *tidle_ksp[NR_CPUS]; +void *idle_task_pointer[NR_CPUS]; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; @@ -130,7 +130,7 @@ static int secondary_cpu_start(int cpuid, struct task_struct *idle) /* * Precalculate the target ksp. */ - tidle_ksp[cpuid] = idle->stack + THREAD_SIZE; + idle_task_pointer[cpuid] = idle; DBGS("Starting secondary cpu %d: state 0x%lx\n", cpuid, idle->state); @@ -230,14 +230,13 @@ void __init setup_smp(void) /* * Called by smp_init prepare the secondaries */ -void __init native_smp_prepare_cpus(unsigned int max_cpus) +void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; /* Take care of some initial bookkeeping. */ memset(ipi_data, 0, sizeof(ipi_data)); init_cpu_topology(); - current_thread_info()->cpu = 0; store_cpu_topology(smp_processor_id()); numa_add_cpu(smp_processor_id()); @@ -256,14 +255,14 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) pr_info("SMP starting up secondaries.\n"); } -void native_smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { int me = smp_processor_id(); per_cpu(cpu_state, me) = CPU_ONLINE; } -int native_vt_cpu_up(unsigned int cpu, struct task_struct *tidle) +int vt_cpu_up(unsigned int cpu, struct task_struct *tidle) { printk("%s: cpu = %d\n", __func__, cpu); @@ -275,10 +274,10 @@ int native_vt_cpu_up(unsigned int cpu, struct task_struct *tidle) } DECLARE_STATIC_KEY_FALSE(use_tc_as_sched_clock); -int native_cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { if (is_in_guest()) - return native_vt_cpu_up(cpu, tidle); + return vt_cpu_up(cpu, tidle); wmb(); smp_rcb->ready = 0; @@ -312,7 +311,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) return cpu_online(cpu) ? 0 : -ENOSYS; } -void __init native_smp_cpus_done(unsigned int max_cpus) +void __init smp_cpus_done(unsigned int max_cpus) { smp_booted = 1; pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); @@ -337,10 +336,18 @@ static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_typ send_ipi(i, II_II0); } +static void ipi_cpu_stop(int cpu) +{ + local_irq_disable(); + set_cpu_online(cpu, false); + while (1) + wait_for_interrupt(); +} + void handle_ipi(struct pt_regs *regs) { - int this_cpu = smp_processor_id(); - unsigned long *pending_ipis = &ipi_data[this_cpu].bits; + int cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[cpu].bits; unsigned long ops; mb(); /* Order interrupt and bit testing. */ @@ -365,11 +372,9 @@ void handle_ipi(struct pt_regs *regs) break; case IPI_CPU_STOP: - local_irq_disable(); - asm("halt"); - + ipi_cpu_stop(cpu); default: - pr_crit("Unknown IPI on CPU %d: %lu\n", this_cpu, which); + pr_crit("Unknown IPI on CPU %d: %lu\n", cpu, which); break; } } while (ops); @@ -377,38 +382,46 @@ void handle_ipi(struct pt_regs *regs) mb(); /* Order data access and bit testing. */ } - cpu_data[this_cpu].ipi_count++; + cpu_data[cpu].ipi_count++; } -void native_smp_send_reschedule(int cpu) +void smp_send_reschedule(int cpu) { -#ifdef DEBUG_IPI_MSG - if (cpu == hard_smp_processor_id()) - pr_warn("smp_send_reschedule: Sending IPI to self.\n"); -#endif send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } +EXPORT_SYMBOL(smp_send_reschedule); -static void native_stop_other_cpus(int wait) +void smp_send_stop(void) { - cpumask_t to_whom; + unsigned long timeout; - cpumask_copy(&to_whom, cpu_possible_mask); - cpumask_clear_cpu(smp_processor_id(), &to_whom); -#ifdef DEBUG_IPI_MSG - if (hard_smp_processor_id() != boot_cpu_id) - pr_warn("smp_send_stop: Not on boot cpu.\n"); -#endif - send_ipi_message(&to_whom, IPI_CPU_STOP); + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); } -void native_send_call_func_ipi(const struct cpumask *mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } -void native_send_call_func_single_ipi(int cpu) +void arch_send_call_function_single_ipi(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } @@ -524,20 +537,19 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) } EXPORT_SYMBOL(flush_tlb_kernel_range); -int native_cpu_disable(void) +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) { int cpu = smp_processor_id(); set_cpu_online(cpu, false); remove_cpu_topology(cpu); numa_remove_cpu(cpu); -#ifdef CONFIG_HOTPLUG_CPU clear_tasks_mm_cpumask(cpu); -#endif return 0; } -void native_cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* We don't do anything here: idle task is faking death itself. */ unsigned int i; @@ -555,28 +567,20 @@ void native_cpu_die(unsigned int cpu) pr_err("CPU %u didn't die...\n", cpu); } -static void disable_timer(void) -{ - if (is_in_guest()) - hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); - else - wrtimer(0); -} - -void native_play_dead(void) +void arch_cpu_idle_dead(void) { idle_task_exit(); mb(); __this_cpu_write(cpu_state, CPU_DEAD); -#ifdef CONFIG_HOTPLUG_CPU fixup_irqs(); -#endif local_irq_disable(); - disable_timer(); - - if (is_in_guest()) + if (is_in_guest()) { + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); hcall(HCALL_STOP, 0, 0, 0); + } else { + wrtimer(0); + } #ifdef CONFIG_SUSPEND @@ -597,21 +601,4 @@ void native_play_dead(void) asm volatile("halt"); #endif } - -struct smp_ops smp_ops = { - .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, - .smp_prepare_cpus = native_smp_prepare_cpus, - .smp_cpus_done = native_smp_cpus_done, - - .stop_other_cpus = native_stop_other_cpus, - .smp_send_reschedule = native_smp_send_reschedule, - - .cpu_up = native_cpu_up, - .cpu_die = native_cpu_die, - .cpu_disable = native_cpu_disable, - .play_dead = native_play_dead, - - .send_call_func_ipi = native_send_call_func_ipi, - .send_call_func_single_ipi = native_send_call_func_single_ipi, -}; -EXPORT_SYMBOL_GPL(smp_ops); +#endif diff --git a/arch/sw_64/kernel/suspend_asm.S b/arch/sw_64/kernel/suspend_asm.S index 73232de4cf1921d1f76db60a3d6a55f21e54fa91..34ee349515a7c1278f24bc9c64dc3e8a6e864137 100644 --- a/arch/sw_64/kernel/suspend_asm.S +++ b/arch/sw_64/kernel/suspend_asm.S @@ -29,6 +29,7 @@ ENTRY(sw64_suspend_deep_sleep) vstd $f9, CALLEE_F9($1) rfpcr $f0 fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) /* save the address of suspend_state to $18 */ mov $16, $18 @@ -53,9 +54,8 @@ $subloop: bis $16, $16, $16 bne $16, $subloop - ldi $8, 0x3fff - bic sp, $8, $8 + ldl $8, PSTATE_KTP($18) ldi $1, PSTATE_REGS($18) ldl $9, CALLEE_R9($1) ldl $10, CALLEE_R10($1) diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c index 6bad8e13b8c9f1fddedb5c80e1e28e0ab7f4d4db..d1037e33480e1fd36bd0e59575712ae696fc8525 100644 --- a/arch/sw_64/kernel/topology.c +++ b/arch/sw_64/kernel/topology.c @@ -2,7 +2,7 @@ #include #include - +#include #include static int __init parse_dt_topology(void) @@ -16,6 +16,64 @@ static int __init parse_dt_topology(void) struct cpu_topology cpu_topology[NR_CPUS]; EXPORT_SYMBOL_GPL(cpu_topology); +int topo_nr_threads, topo_nr_cores, topo_nr_maxcpus; + +static int topo_nr_cpus; +static int topo_threads[NR_CPUS]; +static int topo_cores[NR_CPUS]; +static int topo_packages[NR_CPUS]; + +static void __init init_topo_threads(void) +{ + int i, j; + + if (topo_nr_threads == 0) + topo_nr_threads = 1; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_threads) { + for (j = 0; j < topo_nr_threads; j++) + topo_threads[i+j] = j; + } +} + +static void __init init_topo_cores(void) +{ + int i, j; + + if (topo_nr_cores == 0) + topo_nr_cores = topo_nr_cpus; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_cores) { + for (j = 0; j < topo_nr_cores; j++) + topo_cores[i+j] = j; + } +} + +static void __init init_topo_packages(void) +{ + int i, j, packet_index = 0; + int topo_nr_packages = topo_nr_cpus / (topo_nr_cores * topo_nr_threads); + int div_package = topo_nr_cpus / topo_nr_packages; + + for (i = 0; i < topo_nr_cpus; i += div_package) { + for (j = 0 ; j < div_package; j++) + topo_packages[i+j] = packet_index; + packet_index++; + } + if (packet_index > topo_nr_packages) + pr_err("topo_cores init failed.\n"); +} + +static void __init init_topology_array(void) +{ + topo_nr_cpus = num_present_cpus(); + if (topo_nr_maxcpus > topo_nr_cpus) + topo_nr_cpus = topo_nr_maxcpus; + init_topo_threads(); + init_topo_cores(); + init_topo_packages(); +} + const struct cpumask *cpu_coregroup_mask(int cpu) { return topology_llc_cpumask(cpu); @@ -38,11 +96,11 @@ static void update_siblings_masks(int cpu) if (cpu_topo->package_id == sib_topo->package_id) { cpumask_set_cpu(cpu, &sib_topo->core_sibling); cpumask_set_cpu(sib, &cpu_topo->core_sibling); - } - if (cpu_topo->core_id == sib_topo->core_id) { - cpumask_set_cpu(cpu, &sib_topo->thread_sibling); - cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + if (cpu_topo->core_id == sib_topo->core_id) { + cpumask_set_cpu(cpu, &sib_topo->thread_sibling); + cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + } } } } @@ -54,6 +112,14 @@ void store_cpu_topology(int cpu) if (cpu_topo->package_id != -1) goto topology_populated; + if (is_guest_or_emul()) { + cpu_topo->package_id = topo_packages[cpu]; + cpu_topo->core_id = topo_cores[cpu]; + cpu_topo->thread_id = topo_threads[cpu]; + cpu_topo->llc_id = topo_packages[cpu]; + goto topology_populated; + } + cpu_topo->package_id = rcid_to_package(cpu_to_rcid(cpu)); cpu_topo->core_id = cpu_to_rcid(cpu) & CORE_ID_MASK; cpu_topo->thread_id = (cpu_to_rcid(cpu) >> THREAD_ID_SHIFT) & THREAD_ID_MASK; @@ -126,6 +192,8 @@ void __init init_cpu_topology(void) { reset_cpu_topology(); + if (is_guest_or_emul()) + init_topology_array(); /* * Discard anything that was parsed if we hit an error so we * don't use partial information. diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c index 252eb23dd032bd9e5cb83c4e2e39b9a8f14094bc..fda9ef61a2e449db0439abc1d6476236d61f60d2 100644 --- a/arch/sw_64/kernel/traps.c +++ b/arch/sw_64/kernel/traps.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "proto.h" @@ -38,6 +39,7 @@ enum SW64_IF_TYPES { IF_GENTRAP, IF_FEN, IF_OPDEC, + IF_SIMDEMU, }; void show_regs(struct pt_regs *regs) @@ -165,12 +167,32 @@ do_entArith(unsigned long summary, unsigned long write_mask, force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc, 0); } +void simd_emulate(unsigned int inst, unsigned long va) +{ + unsigned long *fp; + int instr_opc, reg; + + instr_opc = (inst >> 26) & 0x3f; + reg = (inst >> 21) & 0x1f; + fp = (unsigned long *) va; + + switch (instr_opc) { + case 0x0d: /* vldd */ + sw64_write_simd_fp_reg_d(reg, fp[0], fp[1], fp[2], fp[3]); + return; + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + return; + } +} + /* * BPT/GENTRAP/OPDEC make regs->pc = exc_pc + 4. debugger should * do something necessary to handle it correctly. */ asmlinkage void -do_entIF(unsigned long inst_type, struct pt_regs *regs) +do_entIF(unsigned long inst_type, unsigned long va, struct pt_regs *regs) { int signo, code; unsigned int inst, type; @@ -178,6 +200,11 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) type = inst_type & 0xffffffff; inst = inst_type >> 32; + if (type == IF_SIMDEMU) { + simd_emulate(inst, va); + return; + } + if (!user_mode(regs) && type != IF_OPDEC) { if (type == IF_BREAKPOINT) { /* support kgdb */ @@ -547,6 +574,21 @@ do_entUnaUser(void __user *va, unsigned long opcode, unsigned long fp[4]; unsigned long instr, instr_op, value; +#ifdef CONFIG_DEBUG_FS + /* + * If command name is specified, record some information + * to debugfs. + */ + if (unaligned_task[0] && !strcmp(unaligned_task, current->comm)) { + int idx; + + idx = unaligned_count % UNA_MAX_ENTRIES; + unaligned[idx].va = (unsigned long)va; + unaligned[idx].pc = regs->pc; + unaligned_count++; + } +#endif + /* Check the UAC bits to decide what the user wants us to do * with the unaliged access. */ @@ -1459,5 +1501,7 @@ trap_init(void) wrent(entIF, 3); wrent(entUna, 4); wrent(entSys, 5); +#ifdef CONFIG_EFI wrent((void *)entSuspend, 6); +#endif } diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c new file mode 100644 index 0000000000000000000000000000000000000000..40a17fb9cbd2c7e0ac75041644e2bd46ff30a370 --- /dev/null +++ b/arch/sw_64/kernel/unaligned.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +unsigned long unaligned_count; +char unaligned_task[TASK_COMM_LEN]; +struct unaligned_stat unaligned[UNA_MAX_ENTRIES]; + +static ssize_t unaligned_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + + unaligned_count = 0; + size = min(sizeof(unaligned_task), len); + if (copy_from_user(unaligned_task, user_buf, size)) + return -EFAULT; + unaligned_task[size - 1] = '\0'; + + return len; +} + +static int unaligned_show(struct seq_file *m, void *v) +{ + int i, idx, nr; + + if (!unaligned_task[0]) { + seq_puts(m, "No task traced\n"); + return 0; + } + seq_printf(m, "Task command:\t\t%s\n", unaligned_task); + seq_printf(m, "Unaligned count:\t%ld\n", unaligned_count); + if (!unaligned_count) + return 0; + nr = 0; + idx = unaligned_count % UNA_MAX_ENTRIES; + seq_printf(m, "Latest %d unaligned stat:\nNo.\tVA\t\tPC\n", UNA_MAX_ENTRIES); + if (unaligned_count >= UNA_MAX_ENTRIES) { + for (i = idx; i < UNA_MAX_ENTRIES; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + } + for (i = 0; i < idx; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + return 0; +} + +static int unaligned_open(struct inode *inode, struct file *file) +{ + return single_open(file, unaligned_show, NULL); +} + +static const struct file_operations unaligned_fops = { + .read = seq_read, + .write = unaligned_set, + .open = unaligned_open, + .llseek = default_llseek, +}; + +static int __init unaligned_init(void) +{ + struct dentry *unaligned; + + if (!sw64_debugfs_dir) + return -ENODEV; + + unaligned = debugfs_create_file("unaligned", 0644, + sw64_debugfs_dir, NULL, + &unaligned_fops); + if (!unaligned) + return -ENOMEM; + + return 0; +} + +late_initcall(unaligned_init); diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S index a331709320ca042fa599c750966bff284d0d126b..a61ecc387d260497bd414b588a8fac00d67d8bbe 100644 --- a/arch/sw_64/kvm/entry.S +++ b/arch/sw_64/kvm/entry.S @@ -18,22 +18,22 @@ */ ENTRY(__sw64_vcpu_run) /* save host fpregs */ - ldl $1, TI_TASK($8) rfpcr $f0 - fstd $f0, TASK_THREAD_FPCR($1) - vstd $f2, TASK_THREAD_F2($1) - vstd $f3, TASK_THREAD_F3($1) - vstd $f4, TASK_THREAD_F4($1) - vstd $f5, TASK_THREAD_F5($1) - vstd $f6, TASK_THREAD_F6($1) - vstd $f7, TASK_THREAD_F7($1) - vstd $f8, TASK_THREAD_F8($1) - vstd $f9, TASK_THREAD_F9($1) + fstd $f0, TASK_THREAD_FPCR($8) + vstd $f2, TASK_THREAD_F2($8) + vstd $f3, TASK_THREAD_F3($8) + vstd $f4, TASK_THREAD_F4($8) + vstd $f5, TASK_THREAD_F5($8) + vstd $f6, TASK_THREAD_F6($8) + vstd $f7, TASK_THREAD_F7($8) + vstd $f8, TASK_THREAD_F8($8) + vstd $f9, TASK_THREAD_F9($8) ldi sp, -VCPU_RET_SIZE(sp) /* save host pt_regs to current kernel stack */ ldi sp, -PT_REGS_SIZE(sp) stl $9, PT_REGS_R9(sp) + stl $8, PT_REGS_R8(sp) stl $10, PT_REGS_R10(sp) stl $11, PT_REGS_R11(sp) stl $12, PT_REGS_R12(sp) @@ -198,6 +198,7 @@ $g_setfpec_over: stl $28, KVM_REGS_R28($17) /* restore host regs from host sp */ + ldl $8, PT_REGS_R8(sp) ldl $9, PT_REGS_R9(sp) ldl $10, PT_REGS_R10(sp) ldl $11, PT_REGS_R11(sp) @@ -208,11 +209,8 @@ $g_setfpec_over: ldl $26, PT_REGS_R26(sp) ldi sp, PT_REGS_SIZE(sp) - ldi $8, 0x3fff - bic sp, $8, $8 /* restore host fpregs */ - ldl $1, TI_TASK($8) - fldd $f0, TASK_THREAD_FPCR($1) + fldd $f0, TASK_THREAD_FPCR($8) wfpcr $f0 fimovd $f0, $2 and $2, 0x3, $2 @@ -232,14 +230,14 @@ $setfpec_1: $setfpec_2: setfpec2 $setfpec_over: - vldd $f2, TASK_THREAD_F2($1) - vldd $f3, TASK_THREAD_F3($1) - vldd $f4, TASK_THREAD_F4($1) - vldd $f5, TASK_THREAD_F5($1) - vldd $f6, TASK_THREAD_F6($1) - vldd $f7, TASK_THREAD_F7($1) - vldd $f8, TASK_THREAD_F8($1) - vldd $f9, TASK_THREAD_F9($1) + vldd $f2, TASK_THREAD_F2($8) + vldd $f3, TASK_THREAD_F3($8) + vldd $f4, TASK_THREAD_F4($8) + vldd $f5, TASK_THREAD_F5($8) + vldd $f6, TASK_THREAD_F6($8) + vldd $f7, TASK_THREAD_F7($8) + vldd $f8, TASK_THREAD_F8($8) + vldd $f9, TASK_THREAD_F9($8) /* if $0 > 0, handle hcall */ bgt $0, $ret_to diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c index 8304ebfcd5c69ee3d08a3a28a45fa883ef40a607..5d14f2a22f1fadea8b0879d491ff75e9eaf03e2b 100644 --- a/arch/sw_64/kvm/handle_exit.c +++ b/arch/sw_64/kvm/handle_exit.c @@ -17,42 +17,58 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case SW64_KVM_EXIT_IO: + vcpu->stat.io_exits++; return io_mem_abort(vcpu, run, hargs); case SW64_KVM_MIGRATION_SET_DIRTY_HM: case SW64_KVM_MIGRATION_SET_DIRTY: + vcpu->stat.migration_set_dirty++; gfn = hargs->arg2 >> 24; mutex_lock(&vcpu->kvm->slots_lock); kvm_vcpu_mark_page_dirty(vcpu, gfn); mutex_unlock(&vcpu->kvm->slots_lock); return 1; case SW64_KVM_EXIT_HALT: + vcpu->stat.halt_exits++; vcpu->arch.halted = 1; kvm_vcpu_block(vcpu); return 1; case SW64_KVM_EXIT_SHUTDOWN: + vcpu->stat.shutdown_exits++; vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; return 0; case SW64_KVM_EXIT_RESTART: + vcpu->stat.restart_exits++; vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; return 0; + case SW64_KVM_EXIT_STOP: + vcpu->stat.stop_exits++; + vcpu->arch.halted = 1; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + kvm_vcpu_block(vcpu); + return 1; case SW64_KVM_EXIT_TIMER: + vcpu->stat.timer_exits++; set_timer(vcpu, hargs->arg0); return 1; case SW64_KVM_EXIT_IPI: + vcpu->stat.ipi_exits++; vcpu_send_ipi(vcpu, hargs->arg0); return 1; case SW64_KVM_EXIT_DEBUG: + vcpu->stat.debug_exits++; vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; return 0; #ifdef CONFIG_KVM_MEMHOTPLUG case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu->stat.memhotplug_exits++; vcpu_mem_hotplug(vcpu, hargs->arg0); return 1; #endif case SW64_KVM_EXIT_FATAL_ERROR: + vcpu->stat.fatal_error_exits++; printk("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = hargs->arg0; diff --git a/arch/sw_64/kvm/kvm-sw64.c b/arch/sw_64/kvm/kvm-sw64.c index 0f0fa9b586cccbfd00405338fe8f4fbf02b87e41..122fb02957ce709267a171f9134e9384d5746ae3 100644 --- a/arch/sw_64/kvm/kvm-sw64.c +++ b/arch/sw_64/kvm/kvm-sw64.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,13 @@ extern bool bind_vcpu_enabled; #define HARDWARE_VPN_MASK ((1UL << WIDTH_HARDWARE_VPN) - 1) #define VPN_SHIFT (64 - WIDTH_HARDWARE_VPN) +#define VCPU_STAT(n, x, ...) \ + { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } +#define VM_STAT(n, x, ...) \ + { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } +#define DFX_STAT(n, x, ...) \ + { n, offsetof(struct kvm_vcpu_stat, x), DFX_STAT_U64, ## __VA_ARGS__ } + static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu); static void kvm_set_running_vcpu(struct kvm_vcpu *vcpu) @@ -55,11 +63,13 @@ int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { - int irq = e->msi.data & 0xff; + unsigned int vcid; unsigned int vcpu_idx; struct kvm_vcpu *vcpu = NULL; + int irq = e->msi.data & 0xff; - vcpu_idx = irq % atomic_read(&kvm->online_vcpus); + vcid = (e->msi.address_lo & VT_MSIX_ADDR_DEST_ID_MASK) >> VT_MSIX_ADDR_DEST_ID_SHIFT; + vcpu_idx = vcid & 0x1f; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) @@ -149,6 +159,52 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) } struct kvm_stats_debugfs_item debugfs_entries[] = { + VCPU_STAT("exits", exits), + VCPU_STAT("io_exits", io_exits), + VCPU_STAT("mmio_exits", mmio_exits), + VCPU_STAT("migration_set_dirty", migration_set_dirty), + VCPU_STAT("shutdown_exits", shutdown_exits), + VCPU_STAT("restart_exits", restart_exits), + VCPU_STAT("ipi_exits", ipi_exits), + VCPU_STAT("timer_exits", timer_exits), + VCPU_STAT("debug_exits", debug_exits), +#ifdef CONFIG_KVM_MEMHOTPLUG + VCPU_STAT("memhotplug_exits", memhotplug_exits), +#endif + VCPU_STAT("fatal_error_exits", fatal_error_exits), + VCPU_STAT("halt_exits", halt_exits), + VCPU_STAT("halt_successful_poll", halt_successful_poll), + VCPU_STAT("halt_attempted_poll", halt_attempted_poll), + VCPU_STAT("halt_wakeup", halt_wakeup), + VCPU_STAT("halt_poll_invalid", halt_poll_invalid), + VCPU_STAT("signal_exits", signal_exits), + { "vcpu_stat", 0, KVM_STAT_DFX }, + { NULL } +}; + +struct dfx_kvm_stats_debugfs_item dfx_debugfs_entries[] = { + DFX_STAT("pid", pid), + DFX_STAT("exits", exits), + DFX_STAT("io_exits", io_exits), + DFX_STAT("mmio_exits", mmio_exits), + DFX_STAT("migration_set_dirty", migration_set_dirty), + DFX_STAT("shutdown_exits", shutdown_exits), + DFX_STAT("restart_exits", restart_exits), + DFX_STAT("ipi_exits", ipi_exits), + DFX_STAT("timer_exits", timer_exits), + DFX_STAT("debug_exits", debug_exits), + DFX_STAT("fatal_error_exits", fatal_error_exits), + DFX_STAT("halt_exits", halt_exits), + DFX_STAT("halt_successful_poll", halt_successful_poll), + DFX_STAT("halt_attempted_poll", halt_attempted_poll), + DFX_STAT("halt_wakeup", halt_wakeup), + DFX_STAT("halt_poll_invalid", halt_poll_invalid), + DFX_STAT("signal_exits", signal_exits), + DFX_STAT("steal", steal), + DFX_STAT("st_max", st_max), + DFX_STAT("utime", utime), + DFX_STAT("stime", stime), + DFX_STAT("gtime", gtime), { NULL } }; @@ -457,6 +513,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Set up the timer for Guest */ pr_info("vcpu: [%d], regs addr = %#lx, vcpucb = %#lx\n", vcpu->vcpu_id, (unsigned long)&vcpu->arch.regs, (unsigned long)&vcpu->arch.vcb); + vcpu->arch.vtimer_freq = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; hrtimer_init(&vcpu->arch.hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.hrt.function = clockdev_fn; vcpu->arch.tsk = current; @@ -521,10 +578,27 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +void kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ + vcpu_stat->st_max = 0; +} + +static void update_steal_time(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_SCHED_INFO + u64 delta; + + delta = current->sched_info.run_delay - vcpu->stat.steal; + vcpu->stat.steal = current->sched_info.run_delay; + vcpu->stat.st_max = max(vcpu->stat.st_max, delta); +#endif +} + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = cpu; kvm_set_running_vcpu(vcpu); + update_steal_time(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -586,6 +660,13 @@ void _debug_printk_vcpu(struct kvm_vcpu *vcpu) pc, pc_phys, insn, ra, vcpu_get_reg(vcpu, ra)); } +static void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat) +{ + vcpu_stat->utime = current->utime; + vcpu_stat->stime = current->stime; + vcpu_stat->gtime = current->gtime; +} + /* * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. @@ -651,6 +732,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (signal_pending(current)) { ret = -EINTR; run->exit_reason = KVM_EXIT_INTR; + vcpu->stat.signal_exits++; } if (ret <= 0) { @@ -681,6 +763,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Back from guest */ vcpu->mode = OUTSIDE_GUEST_MODE; + vcpu->stat.exits++; local_irq_enable(); guest_exit_irqoff(); @@ -690,6 +773,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ ret = handle_exit(vcpu, run, ret, &hargs); + update_vcpu_stat_time(&vcpu->stat); } if (vcpu->sigset_active) @@ -713,7 +797,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return kvm_arch_vcpu_reset(vcpu); case KVM_SW64_GET_VCB: if (vcpu->arch.vcb.migration_mark) { - result = sw64_io_read(0, LONG_TIME); + result = sw64_io_read(0, LONG_TIME) + + vcpu->arch.vcb.guest_longtime_offset; vcpu->arch.vcb.guest_longtime = result; vcpu->arch.vcb.guest_irqs_pending = vcpu->arch.irqs_pending[0]; } @@ -880,14 +965,13 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status) { u32 irq = irq_level->irq; - unsigned int vcpu_idx, irq_num; + unsigned int irq_num; struct kvm_vcpu *vcpu = NULL; bool level = irq_level->level; - vcpu_idx = irq % atomic_read(&kvm->online_vcpus); irq_num = irq; - - vcpu = kvm_get_vcpu(kvm, vcpu_idx); + /* target core for Intx is core0 */ + vcpu = kvm_get_vcpu(kvm, 0); if (!vcpu) return -EINVAL; diff --git a/arch/sw_64/kvm/kvm_timer.c b/arch/sw_64/kvm/kvm_timer.c index fea819732af5fc2df15426793bb00942a20c12c5..895be63cd8d132b316b02388764744d863c36131 100644 --- a/arch/sw_64/kvm/kvm_timer.c +++ b/arch/sw_64/kvm/kvm_timer.c @@ -16,7 +16,7 @@ * timer interrupt. * * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to - * the next timer interrupt (in nanoseconds). We use the high-resolution timer + * the next timer interrupt (in ticks). We use the high-resolution timer * infrastructure to set a callback at that time. * * 0 means "turn off the clock". @@ -31,6 +31,11 @@ void set_timer(struct kvm_vcpu *vcpu, unsigned long delta) hrtimer_cancel(&vcpu->arch.hrt); return; } + + /* Convert clock event device ticks to nanoseconds */ + delta = delta * NSEC_PER_SEC; + do_div(delta, vcpu->arch.vtimer_freq); + /* * We use wallclock time here, so the Guest might not be running for * all the time between now and the timer interrupt it asked for. This diff --git a/arch/sw_64/kvm/mmio.c b/arch/sw_64/kvm/mmio.c index 340486e8e51bb380ea719cbbb97b46386114d831..fe6ae6f5ed5c497b6d7daef7cbe39fc250498fe8 100644 --- a/arch/sw_64/kvm/mmio.c +++ b/arch/sw_64/kvm/mmio.c @@ -52,6 +52,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); } + vcpu->stat.mmio_exits++; vcpu->arch.regs.pc += 4; return 0; diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 93ec3ecdf4f1a6593dd22fab13cb3ef674a47b54..76104c09730907f0bd0fd511c1abad5157b9786e 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -12,6 +12,7 @@ #include #include +#include #include struct mem_desc_t mem_desc; @@ -97,7 +98,7 @@ void __init callback_init(void) /* Allocate one PGD and one PUD. */ pgd = pgd_offset_k(VMALLOC_START); p4d = p4d_offset(pgd, VMALLOC_START); - p4d_set(p4d, (pud_t *)vmalloc_pud); + p4d_populate(&init_mm, p4d, (pud_t *)vmalloc_pud); } void __init zone_sizes_init(void) diff --git a/arch/sw_64/net/bpf_jit_comp.c b/arch/sw_64/net/bpf_jit_comp.c index 47e8055691544640d6e51c2c9cdc4a599d965753..a8393598a4bc547c81ee0a08f6cedcdc31db9132 100644 --- a/arch/sw_64/net/bpf_jit_comp.c +++ b/arch/sw_64/net/bpf_jit_comp.c @@ -280,54 +280,12 @@ static void emit_sw64_ldu64(const int dst, const u64 imm, struct jit_ctx *ctx) /* Do not change!!! See arch/sw_64/lib/divide.S for more detail */ #define REG(x) "$"str(x) #define str(x) #x +#define DIV_RET_ADDR 23 #define DIVIDEND 24 #define DIVISOR 25 #define RESULT 27 -/* Make these functions noinline because we need their address at runtime */ -noinline void sw64_bpf_jit_helper_div32(void) -{ - register u32 __dividend asm(REG(DIVIDEND)); - register u32 __divisor asm(REG(DIVISOR)); - u32 res = __dividend / __divisor; - - asm volatile( - "" - :: "r"(res)); -} - -noinline void sw64_bpf_jit_helper_mod32(void) -{ - register u32 __dividend asm(REG(DIVIDEND)); - register u32 __divisor asm(REG(DIVISOR)); - u32 res = __dividend % __divisor; - - asm volatile( - "" - :: "r"(res)); -} - -noinline void sw64_bpf_jit_helper_div64(void) -{ - register u64 __dividend asm(REG(DIVIDEND)); - register u64 __divisor asm(REG(DIVISOR)); - u64 res = __dividend / __divisor; - - asm volatile( - "" - :: "r"(res)); -} - -noinline void sw64_bpf_jit_helper_mod64(void) -{ - register u64 __dividend asm(REG(DIVIDEND)); - register u64 __divisor asm(REG(DIVISOR)); - u64 res = __dividend % __divisor; - - asm volatile( - "" - :: "r"(res)); -} +#include static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, u8 code) { emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, dst, DIVIDEND), ctx); @@ -336,25 +294,25 @@ static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, case BPF_ALU: switch (BPF_OP(code)) { case BPF_DIV: - emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)sw64_bpf_jit_helper_div32, ctx); + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divwu, ctx); break; case BPF_MOD: - emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)sw64_bpf_jit_helper_mod32, ctx); + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remwu, ctx); break; } - emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); emit(SW64_BPF_ZAP_IMM(RESULT, 0xf0, dst), ctx); break; case BPF_ALU64: switch (BPF_OP(code)) { case BPF_DIV: - emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)sw64_bpf_jit_helper_div64, ctx); + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divlu, ctx); break; case BPF_MOD: - emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)sw64_bpf_jit_helper_mod64, ctx); + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remlu, ctx); break; } - emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, RESULT, dst), ctx); break; } diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 121299e4e72fcbb7be9f48f1ff26d2388f9cbfa0..3d08cce7c62f3d5076eddc0265af4a87ebd4ceb3 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -323,6 +323,20 @@ config SH_CPU_FREQ If unsure, say N. endif +if SW64 +config SW64_CPUFREQ + bool "sw64 CPU Frequency interface for Chip3 Asic" + depends on SW64_CHIP3 + default y + help + This adds the CPUFreq driver for SW6B processor which supports + software configurable cpu frequency. + + For details, take a look at . + + If unsure, say N. +endif + config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c index 819d8f1437e284e4da4bd381527f895916426843..9259753c8f061e110cf4e81ae39fb3b384db9f29 100644 --- a/drivers/cpufreq/sw64_cpufreq.c +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -21,11 +21,40 @@ #include #include +#include + +#define CRYSTAL_BIT (1UL << 34) static uint nowait; static struct clk *cpuclk; +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_RESV +}; + +static struct cpufreq_frequency_table freq_table[] = { + {0, DC_0, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, + {0, DC_14, 0}, + {0, DC_15, 0}, + {-1, DC_RESV, CPUFREQ_TABLE_END}, +}; + static int sw64_cpu_freq_notifier(struct notifier_block *nb, unsigned long val, void *data); @@ -37,12 +66,10 @@ static int sw64_cpu_freq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; - unsigned long cpu; + unsigned long cpu = freqs->policy->cpu; - for_each_online_cpu(cpu) { - if (val == CPUFREQ_POSTCHANGE) - sw64_update_clockevents(cpu, freqs->new * 1000); - } + if (val == CPUFREQ_POSTCHANGE) + sw64_update_clockevents(cpu, freqs->new * 1000000); return 0; } @@ -57,7 +84,7 @@ static unsigned int sw64_cpufreq_get(unsigned int cpu) return 0; } - return __sw64_cpufreq_get(policy) * 1000; + return __sw64_cpufreq_get(policy); } /* @@ -66,22 +93,23 @@ static unsigned int sw64_cpufreq_get(unsigned int cpu) static int sw64_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) { - unsigned long freq; + unsigned int cpu = policy->cpu; - freq = 50000 * index; + if (!cpu_online(cpu)) + return -ENODEV; sw64_store_policy(policy); /* setting the cpu frequency */ - sw64_set_rate(freq * 1000); + sw64_set_rate(index); return 0; } static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) { - unsigned long rate; int i; + unsigned long max_rate, freq_off; cpuclk = sw64_clk_get(NULL, "cpu_clk"); if (IS_ERR(cpuclk)) { @@ -89,27 +117,34 @@ static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) return PTR_ERR(cpuclk); } - rate = get_cpu_freq() / 1000; + max_rate = get_cpu_freq() / 1000000; - /* clock table init */ - for (i = 0; - (sw64_clockmod_table[i].frequency != CPUFREQ_TABLE_END); - i++) - if (sw64_clockmod_table[i].frequency == 0) - sw64_clockmod_table[i].frequency = (rate * i) / 48; + if (sw64_io_read(0, INIT_CTL) & CRYSTAL_BIT) + freq_off = 50; + else + freq_off = 60; - sw64_set_rate(rate * 1000); + /* clock table init */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (i == 2) + freq_table[i].frequency = freq_off * 36; + if (i > 2) + freq_table[i].frequency = freq_off * 38 + ((i - 3) * freq_off); + + if (freq_table[i].frequency == max_rate) + freq_table[i + 1].frequency = CPUFREQ_TABLE_END; + } policy->clk = cpuclk; - cpufreq_generic_init(policy, &sw64_clockmod_table[0], 0); + cpufreq_generic_init(policy, freq_table, 0); return 0; } static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) { - return cpufreq_frequency_table_verify(policy, &sw64_clockmod_table[0]); + return cpufreq_frequency_table_verify(policy, freq_table); } static int sw64_cpufreq_exit(struct cpufreq_policy *policy) diff --git a/drivers/firmware/efi/sunway-init.c b/drivers/firmware/efi/sunway-init.c index 9a019a2ed21c7dfecc902c2d47ef0c4303ca45b6..d3af23feb8bb602359888827c34e7d498c72cbea 100644 --- a/drivers/firmware/efi/sunway-init.c +++ b/drivers/firmware/efi/sunway-init.c @@ -24,8 +24,10 @@ #include #include +#include unsigned long entSuspend; +unsigned long bios_version; static int __init is_memory(efi_memory_desc_t *md) { @@ -35,7 +37,8 @@ static int __init is_memory(efi_memory_desc_t *md) } static efi_config_table_type_t arch_tables[] __initdata = { {SMBIOS3_TABLE_GUID, NULL, NULL}, - {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"} + {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"}, + {BIOS_VERSION_GUID, &bios_version, "BIOS VERSION"} }; static int __init uefi_init(u64 efi_system_table) @@ -102,6 +105,10 @@ static int __init uefi_init(u64 efi_system_table) early_memunmap(config_tables, table_size); out: early_memunmap(systab, sizeof(efi_system_table_t)); + + if (!bios_version) + retval = -EINVAL; + return retval; } diff --git a/drivers/iommu/sw64/sunway_iommu.c b/drivers/iommu/sw64/sunway_iommu.c index 580619c6a571e8944130c622fed8e13b17cc78a1..86920f88faacdf97495c7774c0af041a7afd65f5 100644 --- a/drivers/iommu/sw64/sunway_iommu.c +++ b/drivers/iommu/sw64/sunway_iommu.c @@ -75,24 +75,6 @@ struct dma_domain { const struct iommu_ops sunway_iommu_ops; static const struct dma_map_ops sunway_dma_ops; -struct pci_controller *get_hose_from_domain(struct sunway_iommu_domain *sdomain) -{ - struct pci_controller *hose = NULL; - struct sunway_iommu *iommu; - - if (!sdomain) - return NULL; - - iommu = sdomain->iommu; - - if (!iommu) - return NULL; - - hose = iommu->hose_pt; - - return hose; -} - /* flush helpers */ static void piu_flush_all(struct pci_controller *hose) @@ -102,90 +84,50 @@ static void piu_flush_all(struct pci_controller *hose) write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); } -struct sunway_iommu *get_iommu_from_device(struct device *dev) -{ - struct sunway_iommu *iommu; - struct pci_controller *hose; - - hose = to_pci_dev(dev)->sysdata; - iommu = hose->pci_iommu; - - return iommu; -} - -void domain_flush_all(struct sunway_iommu_domain *sdomain) +void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, + struct sunway_iommu_dev *sdev_data) { struct pci_controller *hose; + int devid; - hose = get_hose_from_domain(sdomain); - if (!hose) - return; - - piu_flush_all(hose); -} + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = sdev_data->pdev->sysdata; + devid = sdev_data->devid; -void domain_flush_ptlb(struct sunway_iommu_domain *sdomain) -{ - struct pci_controller *hose; - - hose = get_hose_from_domain(sdomain); - if (!hose) - return; - - write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); - write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + } } -void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, struct sunway_iommu_dev *sdev_data) +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) { struct pci_controller *hose; - u16 devid; + struct sunway_iommu_dev *sdev_data; - hose = get_hose_from_domain(sdomain); - if (!hose) - return; + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = sdev_data->pdev->sysdata; - devid = sdev_data->devid; - write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + flush_addr = __pa(flush_addr); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } } -/* - * This function is designed to support IOMMU code only, - * as it only provides 2 specific types of flush ops - */ -void -flush_device_tlb(struct sunway_iommu_domain *sdomain, - unsigned long flush_addr, unsigned long hflush_addr) +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) { struct pci_controller *hose; struct pci_dev *pdev; struct sunway_iommu_dev *sdev_data; - hose = get_hose_from_domain(sdomain); - if (!hose) - return; - - switch (hflush_addr) { - case PCACHE_FLUSHPADDR: - flush_addr = __pa(flush_addr) & 0xffffffff80; - /* Set memory bar here */ - mb(); - write_piu_ior0(hose->node, hose->index, - hflush_addr, flush_addr); - break; + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + pdev = sdev_data->pdev; + hose = pdev->sysdata; - case PTLB_FLUSHVADDR: - list_for_each_entry(sdev_data, &sdomain->dev_list, list) { - pdev = sdev_data->pdev; - flush_addr = (pdev->bus->number << 8) + flush_addr = (pdev->bus->number << 8) | pdev->devfn | (flush_addr << 16); - write_piu_ior0(hose->node, hose->index, - hflush_addr, flush_addr); - } - break; - - default: - break; + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); } } @@ -367,8 +309,7 @@ set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain return; sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); - iommu = sdomain->iommu; - sdev->iommu = iommu; + iommu = sdev->iommu; dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); dte_l1_val = *dte_l1; @@ -416,7 +357,7 @@ static void do_detach(struct sunway_iommu_dev *sdev_data) sdomain->dev_cnt--; pr_debug("iommu: device %d detached from domain %d\n", - sdev_data->devid, sdomain->id); + sdev_data->devid, sdomain->id); } static int @@ -815,6 +756,17 @@ struct syscore_ops iommu_cpu_syscore_ops = { * ******************************************************************************/ +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + static unsigned long sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, unsigned long iova, unsigned long page_size) @@ -829,8 +781,8 @@ sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, pte_l2 = (unsigned long *)fetch_pte(sunway_domain, iova, PTE_LEVEL2); *pte_l2 = 0; - flush_device_tlb(sunway_domain, (unsigned long)pte_l2, PCACHE_FLUSHPADDR); - flush_device_tlb(sunway_domain, (iova >> PAGE_SHIFT), PTLB_FLUSHVADDR); + flush_pcache_by_addr(sunway_domain, (unsigned long)pte_l2); + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); iova += PAGE_SIZE; unmapped += PAGE_SIZE; @@ -869,7 +821,7 @@ int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, goto direct_map; } - iommu = sunway_domain->iommu; + iommu = get_first_iommu_from_domain(sunway_domain); if (!iommu) return -1; page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); @@ -893,7 +845,7 @@ int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; free_page((unsigned long)ptebasecond); } else { - flush_device_tlb(sunway_domain, pdebaseaddr, PCACHE_FLUSHPADDR); + flush_pcache_by_addr(sunway_domain, pdebaseaddr); ptebaseaddr = (unsigned long)ptebasecond + ((iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3); } @@ -909,8 +861,7 @@ int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID | SW64_IOMMU_GRN_8K | SW64_IOMMU_ENABLE; *(volatile u64 *)ptebaseaddr = pte; - flush_device_tlb(sunway_domain, ptebaseaddr, - PCACHE_FLUSHPADDR); + flush_pcache_by_addr(sunway_domain, ptebaseaddr); /* case 8M */ } else if (page_size == (1UL << PAGE_8M_SHIFT)) { unsigned long *ptr; @@ -933,13 +884,12 @@ int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, /* just do once flush per cache line */ if (i % ptes_one_cache == (ptes_one_cache - 1)) - flush_device_tlb(sunway_domain, (unsigned long)ptr, PCACHE_FLUSHPADDR); + flush_pcache_by_addr(sunway_domain, (unsigned long)ptr); ptr++; } } #ifdef CONFIG_SW64_GUEST - flush_device_tlb(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG, - PTLB_FLUSHVADDR); + flush_ptlb_by_addr(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG); #endif return 0; } @@ -1485,9 +1435,6 @@ static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *d if (!hose->iommu_enable) return -EINVAL; - if (!sdomain->iommu) - sdomain->iommu = hose->pci_iommu; - sdev_data = dev_iommu_priv_get(dev); if (!sdev_data) return -EINVAL; @@ -1592,6 +1539,7 @@ static struct iommu_group *sunway_iommu_device_group(struct device *dev) static int iommu_init_device(struct device *dev) { struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; struct pci_dev *pdev; struct pci_controller *hose; @@ -1604,8 +1552,10 @@ static int iommu_init_device(struct device *dev) pdev = to_pci_dev(dev); hose = pdev->sysdata; + iommu = hose->pci_iommu; llist_add(&sdev->dev_data_list, &dev_data_list); sdev->pdev = pdev; + sdev->iommu = iommu; dev_iommu_priv_set(dev, sdev); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h index bc9e13466f0697843f33a10227f588b067179109..52d6452fa14c93b2cfb0fd9aee7f1cb5a88e6b96 100644 --- a/drivers/iommu/sw64/sunway_iommu.h +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -48,7 +48,6 @@ struct sunway_iommu_domain { struct iommu_domain domain; /* IOMMU domain handle */ unsigned long *pt_root; /* Page Table root */ unsigned int dev_cnt; /* Number of devices in this domain */ - struct sunway_iommu *iommu; }; struct sw64dev_table_entry { diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c index 878aff87c99299730cd09f436a0a8e377f6324cd..b36029a79d3b65c1b2e34492e189fa4daeef00bb 100644 --- a/drivers/mfd/lpc_sunway_chip3.c +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -193,6 +193,7 @@ static int lpc_chip3_probe(struct platform_device *pdev) return -ENOMEM; } + platform_set_drvdata(pdev, lpc_adapter); /* Get basic io resource and map it */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { @@ -251,10 +252,57 @@ static const struct of_device_id chip3_lpc_of_match[] = { MODULE_DEVICE_TABLE(of, chip3_lpc_of_match); +#ifdef CONFIG_PM_SLEEP +unsigned int lpc_irq_ctrl_value; +unsigned int lpc_irq_irq_value; +unsigned int lpc_irq_mask_value; + +/** + * chip3_lpc_platform_suspend - Suspend an chip3_lpc-platform device + * @dev: the platform device to suspend + * + * This function stores the lpc controller register values and + * restores them when the machine wakes up. + */ +int chip3_lpc_platform_suspend(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_irq_ctrl_value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + lpc_irq_irq_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); + lpc_irq_mask_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ_MASK); + + return 0; +} + +/** + * chip3_lpc_platform_resume - Resume an chip3_lpc-platform device + * @dev: the platform device to resume + * + * This function restores the register value before the suspend. + */ +int chip3_lpc_platform_resume(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, lpc_irq_ctrl_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, lpc_irq_irq_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, lpc_irq_mask_value); + + return 0; +} +static SIMPLE_DEV_PM_OPS(chip3_lpc_pm_ops, chip3_lpc_platform_suspend, + chip3_lpc_platform_resume); +#endif + + static struct platform_driver chip3_lpc_platform_driver = { .driver = { .name = "chip3_lpc", .of_match_table = chip3_lpc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &chip3_lpc_pm_ops, +#endif }, .remove = lpc_chip3_remove, }; diff --git a/drivers/platform/sw64/Makefile b/drivers/platform/sw64/Makefile index 8d166464e4c98b335292782977e9554c76d8442b..28922224fb1763030b979bb8cd2ce204d41dfd80 100644 --- a/drivers/platform/sw64/Makefile +++ b/drivers/platform/sw64/Makefile @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_LEGACY_XUELANG) += legacy_xuelang.o +obj-$(CONFIG_PLATFORM_XUELANG) += legacy_xuelang.o diff --git a/drivers/platform/sw64/legacy_xuelang.c b/drivers/platform/sw64/legacy_xuelang.c index 803bea9467301bcf309390c6df1d03d72a06e764..8a63d9edf9f230a6137c28e641c966aa5b72d16b 100644 --- a/drivers/platform/sw64/legacy_xuelang.c +++ b/drivers/platform/sw64/legacy_xuelang.c @@ -51,10 +51,13 @@ void sw64_restart(void) static int sw64_reset_init(void) { +#ifdef CONFIG_EFI + if (BIOS_SUPPORT_RESET_CLALLBACK((void *)bios_version)) + return 0; +#endif pm_restart = sw64_restart; pm_power_off = sw64_poweroff; pm_halt = sw64_halt; - return 0; } subsys_initcall(sw64_reset_init); diff --git a/tools/arch/sw_64/include/uapi/asm/perf_regs.h b/tools/arch/sw_64/include/uapi/asm/perf_regs.h index 426ae642fcc8505ce0c9815e6b91ed2b56630116..892be52610265f85cab2fff62d256c093e5593ff 100644 --- a/tools/arch/sw_64/include/uapi/asm/perf_regs.h +++ b/tools/arch/sw_64/include/uapi/asm/perf_regs.h @@ -13,6 +13,16 @@ enum perf_event_sw64_regs { PERF_REG_SW64_R6, PERF_REG_SW64_R7, PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, PERF_REG_SW64_R19, PERF_REG_SW64_R20, PERF_REG_SW64_R21, @@ -23,16 +33,9 @@ enum perf_event_sw64_regs { PERF_REG_SW64_R26, PERF_REG_SW64_R27, PERF_REG_SW64_R28, - PERF_REG_SW64_HAE, - PERF_REG_SW64_TRAP_A0, - PERF_REG_SW64_TRAP_A1, - PERF_REG_SW64_TRAP_A2, - PERF_REG_SW64_PS, - PERF_REG_SW64_PC, PERF_REG_SW64_GP, - PERF_REG_SW64_R16, - PERF_REG_SW64_R17, - PERF_REG_SW64_R18, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, PERF_REG_SW64_MAX, }; #endif /* _ASM_SW64_PERF_REGS_H */ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dc534a9b1a779551af2296db15363518af27569d..401f6cf22a6653505ee277642c1c181b7c6db3c7 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +ARCH ?= $(shell uname -m) FILES= \ test-all.bin \ test-backtrace.bin \ @@ -232,7 +233,11 @@ $(OUTPUT)test-libpython.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) $(OUTPUT)test-libbfd.bin: +ifeq ($(ARCH),sw_64) + $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz +else $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl +endif $(OUTPUT)test-libbfd-buildid.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl diff --git a/tools/perf/arch/sw_64/include/perf_regs.h b/tools/perf/arch/sw_64/include/perf_regs.h index 44e064c027011107bdd9311c1fa57527058239a7..e0c1b15375b5c2c7e975df45adeb644d1eed2ccf 100644 --- a/tools/perf/arch/sw_64/include/perf_regs.h +++ b/tools/perf/arch/sw_64/include/perf_regs.h @@ -13,13 +13,75 @@ void perf_regs_load(u64 *regs); #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 #define PERF_REG_IP PERF_REG_SW64_PC -#define PERF_REG_SP PERF_REG_SW64_HAE +#define PERF_REG_SP PERF_REG_SW64_SP static inline const char *perf_reg_name(int id) { switch (id) { case PERF_REG_SW64_R0: return "r0"; + case PERF_REG_SW64_R1: + return "r1"; + case PERF_REG_SW64_R2: + return "r2"; + case PERF_REG_SW64_R3: + return "r3"; + case PERF_REG_SW64_R4: + return "r4"; + case PERF_REG_SW64_R5: + return "r5"; + case PERF_REG_SW64_R6: + return "r6"; + case PERF_REG_SW64_R7: + return "r7"; + case PERF_REG_SW64_R8: + return "r8"; + case PERF_REG_SW64_R9: + return "r9"; + case PERF_REG_SW64_R10: + return "r10"; + case PERF_REG_SW64_R11: + return "r11"; + case PERF_REG_SW64_R12: + return "r12"; + case PERF_REG_SW64_R13: + return "r13"; + case PERF_REG_SW64_R14: + return "r14"; + case PERF_REG_SW64_R15: + return "r15"; + case PERF_REG_SW64_R16: + return "r16"; + case PERF_REG_SW64_R17: + return "r17"; + case PERF_REG_SW64_R18: + return "r18"; + case PERF_REG_SW64_R19: + return "r19"; + case PERF_REG_SW64_R20: + return "r20"; + case PERF_REG_SW64_R21: + return "r21"; + case PERF_REG_SW64_R22: + return "r22"; + case PERF_REG_SW64_R23: + return "r23"; + case PERF_REG_SW64_R24: + return "r24"; + case PERF_REG_SW64_R25: + return "r25"; + case PERF_REG_SW64_R26: + return "r26"; + case PERF_REG_SW64_R27: + return "r27"; + case PERF_REG_SW64_R28: + return "r28"; + case PERF_REG_SW64_GP: + return "gp"; + case PERF_REG_SW64_SP: + return "sp"; + case PERF_REG_SW64_PC: + return "pc"; default: return NULL; } diff --git a/tools/perf/arch/sw_64/tests/dwarf-unwind.c b/tools/perf/arch/sw_64/tests/dwarf-unwind.c index fae50af5413fd29fda83694287ed63b405e9d602..49488b89352cb5796e81877ee58ee1f2e1a7e69b 100644 --- a/tools/perf/arch/sw_64/tests/dwarf-unwind.c +++ b/tools/perf/arch/sw_64/tests/dwarf-unwind.c @@ -25,7 +25,7 @@ static int sample_ustack(struct perf_sample *sample, return -1; } - sp = (unsigned long) regs[30]; + sp = (unsigned long) regs[PERF_REG_SW64_SP]; map = maps__find(thread->maps, (u64)sp); if (!map) { diff --git a/tools/perf/arch/sw_64/tests/regs_load.S b/tools/perf/arch/sw_64/tests/regs_load.S index 75da34b7b843c8e8bcf8186f9588e807f18869ce..8c5aabc2c6fbde125064ecbcf85787bdd09cbcaf 100644 --- a/tools/perf/arch/sw_64/tests/regs_load.S +++ b/tools/perf/arch/sw_64/tests/regs_load.S @@ -4,35 +4,44 @@ .text .set noat .type perf_regs_load,%function +#define STL_REG(r) stl $r, (8 * r)($16) +#define LDL_REG(r) ldl $r, (8 * r)($16) +#define SP (8 * 30) +#define PC (8 * 31) SYM_FUNC_START(perf_regs_load) - stl $0, 0x0($16); - stl $1, 0x8($16); - stl $2, 0x10($16); - stl $3, 0x18($16); - stl $4, 0x20($16); - stl $5, 0x28($16); - stl $6, 0x30($16); - stl $7, 0x38($16); - stl $8, 0x40($16); - stl $19, 0x48($16); - stl $20, 0x50($16); - stl $21, 0x58($16); - stl $22, 0x60($16); - stl $23, 0x68($16); - stl $24, 0x70($16); - stl $25, 0x78($16); - stl $26, 0x80($16); - stl $27, 0x88($16); - stl $28, 0x90($16); - stl $30, 0x98($16); - stl $20, 0xa0($16); - stl $21, 0xa8($16); - stl $22, 0xb0($16); - stl $23, 0xb8($16); - stl $26, 0xc0($16); - stl $29, 0xc8($16); - stl $16, 0xd0($16); - stl $17, 0xd8($16); - stl $18, 0xe0($16); + STL_REG(0) + STL_REG(1) + STL_REG(2) + STL_REG(3) + STL_REG(4) + STL_REG(5) + STL_REG(6) + STL_REG(7) + STL_REG(8) + STL_REG(9) + STL_REG(10) + STL_REG(11) + STL_REG(12) + STL_REG(13) + STL_REG(14) + STL_REG(15) + STL_REG(16) + STL_REG(17) + STL_REG(18) + STL_REG(19) + STL_REG(20) + STL_REG(21) + STL_REG(22) + STL_REG(23) + STL_REG(24) + STL_REG(25) + STL_REG(26) + STL_REG(27) + STL_REG(28) + STL_REG(29) + mov $30, $17 + stl $17, (SP)($16) + stl $26, (PC)($16) + LDL_REG(17) ret SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/sw_64/util/dwarf-regs.c b/tools/perf/arch/sw_64/util/dwarf-regs.c index b9471d8f47d27bddfbcae97ca80044f366928af1..1cebe3bb75a9578922f9782032b5afa39254c2e3 100644 --- a/tools/perf/arch/sw_64/util/dwarf-regs.c +++ b/tools/perf/arch/sw_64/util/dwarf-regs.c @@ -23,41 +23,45 @@ struct pt_regs_dwarfnum { }; #define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%x##num), .dwarfnum = num} #define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} +#define DWARFNUM2OFFSET(index) \ + (index * sizeof((struct user_pt_regs *)0)->regs[0]) static const struct pt_regs_dwarfnum regdwarfnum_table[] = { - REG_DWARFNUM_NAME("%v0", 0), - REG_DWARFNUM_NAME("%t0", 1), - REG_DWARFNUM_NAME("%t1", 2), - REG_DWARFNUM_NAME("%t2", 3), - REG_DWARFNUM_NAME("%t3", 4), - REG_DWARFNUM_NAME("%t4", 5), - REG_DWARFNUM_NAME("%t5", 6), - REG_DWARFNUM_NAME("%t6", 7), - REG_DWARFNUM_NAME("%t7", 8), - REG_DWARFNUM_NAME("%s0", 9), - REG_DWARFNUM_NAME("%s1", 10), - REG_DWARFNUM_NAME("%s2", 11), - REG_DWARFNUM_NAME("%s3", 12), - REG_DWARFNUM_NAME("%s4", 13), - REG_DWARFNUM_NAME("%s5", 14), - REG_DWARFNUM_NAME("%s6", 15), - REG_DWARFNUM_NAME("%a0", 16), - REG_DWARFNUM_NAME("%a1", 17), - REG_DWARFNUM_NAME("%a2", 18), - REG_DWARFNUM_NAME("%a3", 19), - REG_DWARFNUM_NAME("%a4", 20), - REG_DWARFNUM_NAME("%a5", 21), - REG_DWARFNUM_NAME("%t8", 22), - REG_DWARFNUM_NAME("%t9", 23), - REG_DWARFNUM_NAME("%t10", 24), - REG_DWARFNUM_NAME("%t11", 25), - REG_DWARFNUM_NAME("%ra", 26), - REG_DWARFNUM_NAME("%pv", 27), - REG_DWARFNUM_NAME("%at", 28), + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + REG_DWARFNUM_NAME("%fp", 15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), REG_DWARFNUM_NAME("%gp", 29), REG_DWARFNUM_NAME("%sp", 30), - REG_DWARFNUM_NAME("%zero", 31), REG_DWARFNUM_END, }; @@ -72,7 +76,6 @@ static const struct pt_regs_dwarfnum regdwarfnum_table[] = { const char *get_arch_regstr(unsigned int n) { const struct pt_regs_dwarfnum *roff; - for (roff = regdwarfnum_table; roff->name != NULL; roff++) if (roff->dwarfnum == n) return roff->name; @@ -85,6 +88,6 @@ int regs_query_register_offset(const char *name) for (roff = regdwarfnum_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) - return roff->dwarfnum; + return DWARFNUM2OFFSET(roff->dwarfnum); return -EINVAL; } diff --git a/tools/perf/arch/sw_64/util/unwind-libunwind.c b/tools/perf/arch/sw_64/util/unwind-libunwind.c index e1dc380610bc1d45d9fa1fb3af18547a32187eb0..134e3c2280d297b3374ee4d7a6c471bc4d923b14 100644 --- a/tools/perf/arch/sw_64/util/unwind-libunwind.c +++ b/tools/perf/arch/sw_64/util/unwind-libunwind.c @@ -11,10 +11,68 @@ int LIBUNWIND__ARCH_REG_ID(int regnum) { switch (regnum) { + case UNW_SW_64_R0: + return PERF_REG_SW64_R0; + case UNW_SW_64_R1: + return PERF_REG_SW64_R1; + case UNW_SW_64_R2: + return PERF_REG_SW64_R2; + case UNW_SW_64_R3: + return PERF_REG_SW64_R3; + case UNW_SW_64_R4: + return PERF_REG_SW64_R4; + case UNW_SW_64_R5: + return PERF_REG_SW64_R5; + case UNW_SW_64_R6: + return PERF_REG_SW64_R6; + case UNW_SW_64_R7: + return PERF_REG_SW64_R7; + case UNW_SW_64_R8: + return PERF_REG_SW64_R8; + case UNW_SW_64_R9: + return PERF_REG_SW64_R9; + case UNW_SW_64_R10: + return PERF_REG_SW64_R10; + case UNW_SW_64_R11: + return PERF_REG_SW64_R11; + case UNW_SW_64_R12: + return PERF_REG_SW64_R12; + case UNW_SW_64_R13: + return PERF_REG_SW64_R13; + case UNW_SW_64_R14: + return PERF_REG_SW64_R14; + case UNW_SW_64_R15: + return PERF_REG_SW64_R15; + case UNW_SW_64_R16: + return PERF_REG_SW64_R16; + case UNW_SW_64_R17: + return PERF_REG_SW64_R17; + case UNW_SW_64_R18: + return PERF_REG_SW64_R18; + case UNW_SW_64_R19: + return PERF_REG_SW64_R19; + case UNW_SW_64_R20: + return PERF_REG_SW64_R20; + case UNW_SW_64_R21: + return PERF_REG_SW64_R21; + case UNW_SW_64_R22: + return PERF_REG_SW64_R22; + case UNW_SW_64_R23: + return PERF_REG_SW64_R23; + case UNW_SW_64_R24: + return PERF_REG_SW64_R24; + case UNW_SW_64_R25: + return PERF_REG_SW64_R25; case UNW_SW_64_R26: return PERF_REG_SW64_R26; + case UNW_SW_64_R27: + return PERF_REG_SW64_R27; + case UNW_SW_64_R28: + return PERF_REG_SW64_R28; + case UNW_SW_64_R29: + return PERF_REG_SW64_GP; case UNW_SW_64_R30: - return PERF_REG_SW64_HAE; + return PERF_REG_SW64_SP; case UNW_SW_64_PC: return PERF_REG_SW64_PC; default: diff --git a/tools/perf/util/libunwind/sw64.c b/tools/perf/util/libunwind/sw64.c index 45125b1034f83e362206b8d862e2604be81feba3..12452bf2ab8b8539a317329969fffa8801e38082 100644 --- a/tools/perf/util/libunwind/sw64.c +++ b/tools/perf/util/libunwind/sw64.c @@ -17,14 +17,14 @@ /* Define arch specific functions & regs for libunwind, should be * defined before including "unwind.h" */ -#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arm64_reg_id(regnum) +#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__sw_64_reg_id(regnum) #define LIBUNWIND__ARCH_REG_IP PERF_REG_SW64_PC -#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_HAE +#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_SP #include "unwind.h" #include "debug.h" #include "libunwind-sw_64.h" -#include <../../../../arch/sw_64/include/uapi/asm/perf_regs.h> +#include <../../../arch/sw_64/include/uapi/asm/perf_regs.h> #include "../../arch/sw_64/util/unwind-libunwind.c" #include "util/unwind-libunwind-local.c" diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4283d24bacae1ce3f0ae04edd3759efca69eb7a6..b2650b45c0b404edbbed9924bbf80ce0d8714a85 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3302,6 +3302,9 @@ static long kvm_vcpu_ioctl(struct file *filp, if (oldpid) synchronize_rcu(); put_pid(oldpid); +#ifdef CONFIG_SW64 + vcpu->stat.pid = current->pid; +#endif } r = kvm_arch_vcpu_ioctl_run(vcpu); trace_kvm_userspace_exit(vcpu->run->exit_reason, r);